From 0bd29dd7d61dae77b7820f79d46e8a52e74267c2 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Wed, 17 Jan 2024 14:43:22 +0100 Subject: [PATCH 01/33] fix: Update nethermind to expose host on 0.0.0.0 (#467) --- src/el/nethermind/nethermind_launcher.star | 1 + src/package_io/input_parser.star | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/el/nethermind/nethermind_launcher.star b/src/el/nethermind/nethermind_launcher.star index 0c7bdd8b4..c7deeb771 100644 --- a/src/el/nethermind/nethermind_launcher.star +++ b/src/el/nethermind/nethermind_launcher.star @@ -181,6 +181,7 @@ def get_config( "--Network.OnlyStaticPeers=true", "--Metrics.Enabled=true", "--Metrics.ExposePort={0}".format(METRICS_PORT_NUM), + "--Metrics.ExposeHost=0.0.0.0", ] if network not in constants.PUBLIC_NETWORKS: diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index e3efee204..73dac5c8c 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -7,7 +7,7 @@ genesis_constants = import_module( DEFAULT_EL_IMAGES = { "geth": "ethereum/client-go:latest", "erigon": "ethpandaops/erigon:devel", - "nethermind": "nethermind/nethermind:latest", + "nethermind": "nethermindeth/nethermind:master", "besu": "hyperledger/besu:latest", "reth": "ghcr.io/paradigmxyz/reth", "ethereumjs": "ethpandaops/ethereumjs:master", From b9523cb7083be78c96bb88a7ca86d142cb0eec1d Mon Sep 17 00:00:00 2001 From: Sam Calder-Mason Date: Thu, 18 Jan 2024 15:55:37 +1000 Subject: [PATCH 02/33] feat: Add Xatu Sentry (#466) Adds support for [Xatu](https://github.com/ethpandaops/xatu) in Sentry mode. Similarly to Ethereum Metrics Exporter, Xatu Sentry can be enabled globally, or per client pair. Note: This PR only enables "Sentry". Xatu Server/Clickhouse etc will need to run outside of Kurtosis for this to be useful. --------- Co-authored-by: Barnabas Busa --- README.md | 29 +++++ main.star | 5 + network_params.yaml | 2 + src/package_io/input_parser.star | 43 +++++++ src/participant.star | 2 + src/participant_network.star | 32 +++++ src/prometheus/prometheus_launcher.star | 20 ++++ src/static_files/static_files.star | 6 + src/xatu_sentry/xatu_sentry_context.star | 10 ++ src/xatu_sentry/xatu_sentry_launcher.star | 113 ++++++++++++++++++ .../xatu-sentry-config/config.yaml.tmpl | 63 ++++++++++ 11 files changed, 325 insertions(+) create mode 100644 src/xatu_sentry/xatu_sentry_context.star create mode 100644 src/xatu_sentry/xatu_sentry_launcher.star create mode 100644 static_files/xatu-sentry-config/config.yaml.tmpl diff --git a/README.md b/README.md index 3cbf859fc..65ccc43a6 100644 --- a/README.md +++ b/README.md @@ -231,6 +231,10 @@ participants: # Defaults to false ethereum_metrics_exporter_enabled: false + # Enables Xatu Sentry for this participant. Can be set globally. + # Defaults to false + xatu_sentry_enabled: false + # Count of nodes to spin up for this participant # Default to 1 count: 1 @@ -466,6 +470,31 @@ mev_params: # Optional parameters to send to the custom_flood script that sends reliable payloads custom_flood_params: interval_between_transactions: 1 + +# Enables Xatu Sentry for all participants +# Defaults to false +xatu_sentry_enabled: false + +# Xatu Sentry params +xatu_sentry_params: + # The image to use for Xatu Sentry + xatu_sentry_image: ethpandaops/xatu:latest + # GRPC Endpoint of Xatu Server to send events to + xatu_server_addr: localhost:8080 + # Enables TLS to Xatu Server + xatu_server_tls: false + # Headers to add on to Xatu Server requests + xatu_server_headers: {} + # Beacon event stream topics to subscribe to + beacon_subscriptions: + - attestation + - block + - chain_reorg + - finalized_checkpoint + - head + - voluntary_exit + - contribution_and_proof + - blob_sidecar ``` #### Example configurations diff --git a/main.star b/main.star index 344d2e58f..41b56d46f 100644 --- a/main.star +++ b/main.star @@ -61,6 +61,7 @@ def run(plan, args={}): mev_params = args_with_right_defaults.mev_params parallel_keystore_generation = args_with_right_defaults.parallel_keystore_generation persistent = args_with_right_defaults.persistent + xatu_sentry_params = args_with_right_defaults.xatu_sentry_params grafana_datasource_config_template = read_file( static_files.GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH @@ -94,6 +95,7 @@ def run(plan, args={}): args_with_right_defaults.global_client_log_level, jwt_file, persistent, + xatu_sentry_params, parallel_keystore_generation, ) @@ -107,12 +109,14 @@ def run(plan, args={}): all_el_client_contexts = [] all_cl_client_contexts = [] all_ethereum_metrics_exporter_contexts = [] + all_xatu_sentry_contexts = [] for participant in all_participants: all_el_client_contexts.append(participant.el_client_context) all_cl_client_contexts.append(participant.cl_client_context) all_ethereum_metrics_exporter_contexts.append( participant.ethereum_metrics_exporter_context ) + all_xatu_sentry_contexts.append(participant.xatu_sentry_context) # Generate validator ranges validator_ranges_config_template = read_file( @@ -405,6 +409,7 @@ def run(plan, args={}): all_cl_client_contexts, prometheus_additional_metrics_jobs, all_ethereum_metrics_exporter_contexts, + all_xatu_sentry_contexts, ) plan.print("Launching grafana...") diff --git a/network_params.yaml b/network_params.yaml index 449a566f0..984ecf931 100644 --- a/network_params.yaml +++ b/network_params.yaml @@ -15,6 +15,7 @@ participants: validator_count: null snooper_enabled: false ethereum_metrics_exporter_enabled: false + xatu_sentry_enabled: false el_min_cpu: 0 el_max_cpu: 0 el_min_mem: 0 @@ -76,3 +77,4 @@ mev_params: mev_flood_seconds_per_bundle: 15 grafana_additional_dashboards: [] persistent: false +xatu_sentry_enabled: false diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index 73dac5c8c..b3852d532 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -56,6 +56,7 @@ ATTR_TO_BE_SKIPPED_AT_ROOT = ( "goomy_blob_params", "tx_spammer_params", "custom_flood_params", + "xatu_sentry_params", ) @@ -75,6 +76,7 @@ def input_parser(plan, input_args): result["disable_peer_scoring"] = False result["goomy_blob_params"] = get_default_goomy_blob_params() result["assertoor_params"] = get_default_assertoor_params() + result["xatu_sentry_params"] = get_default_xatu_sentry_params() result["persistent"] = False for attr in input_args: @@ -103,6 +105,10 @@ def input_parser(plan, input_args): for sub_attr in input_args["assertoor_params"]: sub_value = input_args["assertoor_params"][sub_attr] result["assertoor_params"][sub_attr] = sub_value + elif attr == "xatu_sentry_params": + for sub_attr in input_args["xatu_sentry_params"]: + sub_value = input_args["xatu_sentry_params"][sub_attr] + result["xatu_sentry_params"][sub_attr] = sub_value if result.get("disable_peer_scoring"): result = enrich_disable_peer_scoring(result) @@ -165,6 +171,7 @@ def input_parser(plan, input_args): ethereum_metrics_exporter_enabled=participant[ "ethereum_metrics_exporter_enabled" ], + xatu_sentry_enabled=participant["xatu_sentry_enabled"], prometheus_config=struct( scrape_interval=participant["prometheus_config"]["scrape_interval"], labels=participant["prometheus_config"]["labels"], @@ -249,10 +256,18 @@ def input_parser(plan, input_args): mev_type=result["mev_type"], snooper_enabled=result["snooper_enabled"], ethereum_metrics_exporter_enabled=result["ethereum_metrics_exporter_enabled"], + xatu_sentry_enabled=result["xatu_sentry_enabled"], parallel_keystore_generation=result["parallel_keystore_generation"], grafana_additional_dashboards=result["grafana_additional_dashboards"], disable_peer_scoring=result["disable_peer_scoring"], persistent=result["persistent"], + xatu_sentry_params=struct( + xatu_sentry_image=result["xatu_sentry_params"]["xatu_sentry_image"], + xatu_server_addr=result["xatu_sentry_params"]["xatu_server_addr"], + xatu_server_headers=result["xatu_sentry_params"]["xatu_server_headers"], + beacon_subscriptions=result["xatu_sentry_params"]["beacon_subscriptions"], + xatu_server_tls=result["xatu_sentry_params"]["xatu_server_tls"], + ), ) @@ -333,6 +348,8 @@ def parse_network_params(input_args): "ethereum_metrics_exporter_enabled" ] + xatu_sentry_enabled = participant["xatu_sentry_enabled"] + blobber_enabled = participant["blobber_enabled"] if blobber_enabled: # unless we are running lighthouse, we don't support blobber @@ -352,6 +369,11 @@ def parse_network_params(input_args): "ethereum_metrics_exporter_enabled" ] = default_ethereum_metrics_exporter_enabled + if xatu_sentry_enabled == False: + default_xatu_sentry_enabled = result["xatu_sentry_enabled"] + if default_xatu_sentry_enabled: + participant["xatu_sentry_enabled"] = default_xatu_sentry_enabled + validator_count = participant["validator_count"] if validator_count == None: default_validator_count = result["network_params"][ @@ -442,6 +464,7 @@ def default_input_args(): "global_client_log_level": "info", "snooper_enabled": False, "ethereum_metrics_exporter_enabled": False, + "xatu_sentry_enabled": False, "parallel_keystore_generation": False, "disable_peer_scoring": False, } @@ -501,6 +524,7 @@ def default_participant(): "validator_count": None, "snooper_enabled": False, "ethereum_metrics_exporter_enabled": False, + "xatu_sentry_enabled": False, "count": 1, "prometheus_config": { "scrape_interval": "15s", @@ -551,6 +575,25 @@ def get_default_assertoor_params(): } +def get_default_xatu_sentry_params(): + return { + "xatu_sentry_image": "ethpandaops/xatu:latest", + "xatu_server_addr": "localhost:8080", + "xatu_server_headers": {}, + "xatu_server_tls": False, + "beacon_subscriptions": [ + "attestation", + "block", + "chain_reorg", + "finalized_checkpoint", + "head", + "voluntary_exit", + "contribution_and_proof", + "blob_sidecar", + ], + } + + def get_default_custom_flood_params(): # this is a simple script that increases the balance of the coinbase address at a cadence return {"interval_between_transactions": 1} diff --git a/src/participant.star b/src/participant.star index 0780f0354..315a4a51c 100644 --- a/src/participant.star +++ b/src/participant.star @@ -5,6 +5,7 @@ def new_participant( cl_client_context, snooper_engine_context, ethereum_metrics_exporter_context, + xatu_sentry_context, ): return struct( el_client_type=el_client_type, @@ -13,4 +14,5 @@ def new_participant( cl_client_context=cl_client_context, snooper_engine_context=snooper_engine_context, ethereum_metrics_exporter_context=ethereum_metrics_exporter_context, + xatu_sentry_context=xatu_sentry_context, ) diff --git a/src/participant_network.star b/src/participant_network.star index b0ab404e4..cc0034ff0 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -31,6 +31,8 @@ ethereum_metrics_exporter = import_module( "./ethereum_metrics_exporter/ethereum_metrics_exporter_launcher.star" ) +xatu_sentry = import_module("./xatu_sentry/xatu_sentry_launcher.star") + genesis_constants = import_module( "./prelaunch_data_generator/genesis_constants/genesis_constants.star" ) @@ -61,6 +63,7 @@ def launch_participant_network( global_log_level, jwt_file, persistent, + xatu_sentry_params, parallel_keystore_generation=False, ): num_participants = len(participants) @@ -365,6 +368,7 @@ def launch_participant_network( all_snooper_engine_contexts = [] all_cl_client_contexts = [] all_ethereum_metrics_exporter_contexts = [] + all_xatu_sentry_contexts = [] preregistered_validator_keys_for_nodes = ( validator_data.per_node_keystores if network_params.network == "kurtosis" @@ -513,6 +517,29 @@ def launch_participant_network( all_ethereum_metrics_exporter_contexts.append(ethereum_metrics_exporter_context) + xatu_sentry_context = None + + if participant.xatu_sentry_enabled: + pair_name = "{0}-{1}-{2}".format(index_str, cl_client_type, el_client_type) + + xatu_sentry_service_name = "xatu-sentry-{0}".format(pair_name) + + xatu_sentry_context = xatu_sentry.launch( + plan, + xatu_sentry_service_name, + cl_client_context, + xatu_sentry_params, + network_params, + pair_name, + ) + plan.print( + "Successfully added {0} xatu sentry participants".format( + xatu_sentry_context + ) + ) + + all_xatu_sentry_contexts.append(xatu_sentry_context) + plan.print("Successfully added {0} CL participants".format(num_participants)) all_participants = [] @@ -533,6 +560,10 @@ def launch_participant_network( ethereum_metrics_exporter_context = all_ethereum_metrics_exporter_contexts[ index ] + xatu_sentry_context = None + + if participant.xatu_sentry_enabled: + xatu_sentry_context = all_xatu_sentry_contexts[index] participant_entry = participant_module.new_participant( el_client_type, @@ -541,6 +572,7 @@ def launch_participant_network( cl_client_context, snooper_engine_context, ethereum_metrics_exporter_context, + xatu_sentry_context, ) all_participants.append(participant_entry) diff --git a/src/prometheus/prometheus_launcher.star b/src/prometheus/prometheus_launcher.star index 61a6b9366..753edc6d8 100644 --- a/src/prometheus/prometheus_launcher.star +++ b/src/prometheus/prometheus_launcher.star @@ -25,12 +25,14 @@ def launch_prometheus( cl_client_contexts, additional_metrics_jobs, ethereum_metrics_exporter_contexts, + xatu_sentry_contexts, ): metrics_jobs = get_metrics_jobs( el_client_contexts, cl_client_contexts, additional_metrics_jobs, ethereum_metrics_exporter_contexts, + xatu_sentry_contexts, ) prometheus_url = prometheus.run( plan, metrics_jobs, MIN_CPU, MAX_CPU, MIN_MEMORY, MAX_MEMORY @@ -44,6 +46,7 @@ def get_metrics_jobs( cl_client_contexts, additional_metrics_jobs, ethereum_metrics_exporter_contexts, + xatu_sentry_contexts, ): metrics_jobs = [] # Adding execution clients metrics jobs @@ -159,6 +162,23 @@ def get_metrics_jobs( }, ) ) + # Adding Xatu Sentry metrics jobs + for context in xatu_sentry_contexts: + if context != None: + metrics_jobs.append( + new_metrics_job( + job_name="xatu-sentry-{0}".format(context.pair_name), + endpoint="{}:{}".format( + context.ip_addr, + context.metrics_port_num, + ), + metrics_path="/metrics", + labels={ + "pair": context.pair_name, + }, + ) + ) + # Adding additional metrics jobs for job in additional_metrics_jobs: if job == None: diff --git a/src/static_files/static_files.star b/src/static_files/static_files.star index 285eb6043..07a4745da 100644 --- a/src/static_files/static_files.star +++ b/src/static_files/static_files.star @@ -31,6 +31,12 @@ ASSERTOOR_TESTS_CONFIG_DIRPATH = ( STATIC_FILES_DIRPATH + ASSERTOOR_CONFIG_DIRPATH + "/tests" ) +# xatu-sentry config +XATU_SENTRY_CONFIG_DIRPATH = "/xatu-sentry-config" +XATU_SENTRY_CONFIG_TEMPLATE_FILEPATH = ( + STATIC_FILES_DIRPATH + XATU_SENTRY_CONFIG_DIRPATH + "/config.yaml.tmpl" +) + # Grafana config GRAFANA_CONFIG_DIRPATH = "/grafana-config" GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH = ( diff --git a/src/xatu_sentry/xatu_sentry_context.star b/src/xatu_sentry/xatu_sentry_context.star new file mode 100644 index 000000000..096e22f92 --- /dev/null +++ b/src/xatu_sentry/xatu_sentry_context.star @@ -0,0 +1,10 @@ +def new_xatu_sentry_context( + ip_addr, + metrics_port_num, + pair_name, +): + return struct( + ip_addr=ip_addr, + metrics_port_num=metrics_port_num, + pair_name=pair_name, + ) diff --git a/src/xatu_sentry/xatu_sentry_launcher.star b/src/xatu_sentry/xatu_sentry_launcher.star new file mode 100644 index 000000000..3236fcfe8 --- /dev/null +++ b/src/xatu_sentry/xatu_sentry_launcher.star @@ -0,0 +1,113 @@ +shared_utils = import_module("../shared_utils/shared_utils.star") +static_files = import_module("../static_files/static_files.star") +xatu_sentry_context = import_module("../xatu_sentry/xatu_sentry_context.star") + +HTTP_PORT_ID = "http" +METRICS_PORT_NUMBER = 9090 + +XATU_SENTRY_CONFIG_MOUNT_DIRPATH_ON_SERVICE = "/config" +XATU_SENTRY_CONFIG_FILENAME = "config.yaml" + +# The min/max CPU/memory that xatu-sentry can use +MIN_CPU = 10 +MAX_CPU = 1000 +MIN_MEMORY = 16 +MAX_MEMORY = 1024 + + +def launch( + plan, + xatu_sentry_service_name, + cl_client_context, + xatu_sentry_params, + network_params, + pair_name, +): + config_template = read_file(static_files.XATU_SENTRY_CONFIG_TEMPLATE_FILEPATH) + + template_data = new_config_template_data( + str(METRICS_PORT_NUMBER), + pair_name, + "http://{}:{}".format( + cl_client_context.ip_addr, + cl_client_context.http_port_num, + ), + xatu_sentry_params.xatu_server_addr, + network_params.network, + xatu_sentry_params.beacon_subscriptions, + xatu_sentry_params.xatu_server_headers, + xatu_sentry_params.xatu_server_tls, + ) + + template_and_data = shared_utils.new_template_and_data( + config_template, template_data + ) + + template_and_data_by_rel_dest_filepath = {} + + config_name = "{}-{}".format(xatu_sentry_service_name, XATU_SENTRY_CONFIG_FILENAME) + + template_and_data_by_rel_dest_filepath[config_name] = template_and_data + + config_files_artifact_name = plan.render_templates( + template_and_data_by_rel_dest_filepath, config_name + ) + + config_file_path = shared_utils.path_join( + XATU_SENTRY_CONFIG_MOUNT_DIRPATH_ON_SERVICE, + config_name, + ) + + xatu_sentry_service = plan.add_service( + xatu_sentry_service_name, + ServiceConfig( + image=xatu_sentry_params.xatu_sentry_image, + ports={ + HTTP_PORT_ID: shared_utils.new_port_spec( + METRICS_PORT_NUMBER, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ) + }, + files={ + XATU_SENTRY_CONFIG_MOUNT_DIRPATH_ON_SERVICE: config_files_artifact_name, + }, + cmd=[ + "sentry", + "--config", + config_file_path, + ], + min_cpu=MIN_CPU, + max_cpu=MAX_CPU, + min_memory=MIN_MEMORY, + max_memory=MAX_MEMORY, + ), + ) + + return xatu_sentry_context.new_xatu_sentry_context( + xatu_sentry_service.ip_address, + METRICS_PORT_NUMBER, + pair_name, + ) + + +def new_config_template_data( + metrics_port, + beacon_node_name, + beacon_node_addr, + xatu_server_addr, + network_name, + beacon_subscriptions, + xatu_server_headers, + xatu_server_tls, +): + return { + "MetricsPort": metrics_port, + "BeaconNodeName": beacon_node_name, + "BeaconNodeAddress": beacon_node_addr, + "XatuServerAddress": xatu_server_addr, + "EthereumNetworkName": network_name, + "BeaconSubscriptions": beacon_subscriptions, + "XatuServerHeaders": xatu_server_headers, + "XatuServerTLS": xatu_server_tls, + } diff --git a/static_files/xatu-sentry-config/config.yaml.tmpl b/static_files/xatu-sentry-config/config.yaml.tmpl new file mode 100644 index 000000000..5be3ab05c --- /dev/null +++ b/static_files/xatu-sentry-config/config.yaml.tmpl @@ -0,0 +1,63 @@ +logging: "debug" +metricsAddr: ":{{ .MetricsPort }}" + +name: "{{ .BeaconNodeName }}" + +ntpServer: time.google.com + +ethereum: + beaconNodeAddress: "{{ .BeaconNodeAddress }}" + overrideNetworkName: "{{ .EthereumNetworkName }}" + beaconSubscriptions: + {{- range .BeaconSubscriptions }} + - "{{ . }}" + {{- end }} + +forkChoice: + enabled: false + + onReOrgEvent: + enabled: false + + interval: + enabled: false + every: 30s + + at: + enabled: false + slotTimes: + - 4s + +attestationData: + enabled: false + + allCommittees: false + + interval: + enabled: false + every: 30s + + at: + enabled: false + slotTimes: + - 4s + +beaconCommittees: + enabled: true + +outputs: +- name: xatu-server + type: xatu + config: + address: "{{ .XatuServerAddress }}" + tls: {{ .XatuServerTLS }} + maxQueueSize: 51200 + batchTimeout: 1s + exportTimeout: 10s + maxExportBatchSize: 256 + {{- if .XatuServerHeaders }} + headers: + {{- range $key, $value := .XatuServerHeaders }} + {{ $key }}: "{{ $value }}" + {{- end }} + {{- end }} \ No newline at end of file From f8289cb49f68dd488635d2313c007ee7c2f4dbf3 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Fri, 19 Jan 2024 20:44:56 +0100 Subject: [PATCH 03/33] feat: enable syncing ephemery (#459) --- .../tests/{devnet-test.yaml => ephemery.yaml} | 4 +- README.md | 3 +- examples/README.md | 1 + examples/capella-mev.yaml | 18 ----- examples/dencun.yaml | 32 --------- examples/verkle-gen-nethermind.yaml | 32 --------- examples/verkle-gen.yaml | 17 ----- examples/verkle.yaml | 17 ----- main.star | 2 +- src/cl/lighthouse/lighthouse_launcher.star | 71 +++++++++++-------- src/cl/lodestar/lodestar_launcher.star | 20 ++++-- src/cl/nimbus/nimbus_launcher.star | 23 +++--- src/cl/prysm/prysm_launcher.star | 30 ++++++-- src/cl/teku/teku_launcher.star | 69 +++++++++++------- src/el/besu/besu_launcher.star | 2 +- src/el/erigon/erigon_launcher.star | 2 +- src/el/ethereumjs/ethereumjs_launcher.star | 2 +- src/el/geth/geth_launcher.star | 8 ++- src/el/nethermind/nethermind_launcher.star | 2 +- src/el/reth/reth_launcher.star | 2 +- src/package_io/constants.star | 11 +++ src/participant_network.star | 33 +++++++-- src/shared_utils/shared_utils.star | 4 +- 23 files changed, 190 insertions(+), 215 deletions(-) rename .github/tests/{devnet-test.yaml => ephemery.yaml} (87%) create mode 100644 examples/README.md delete mode 100644 examples/capella-mev.yaml delete mode 100644 examples/dencun.yaml delete mode 100644 examples/verkle-gen-nethermind.yaml delete mode 100644 examples/verkle-gen.yaml delete mode 100644 examples/verkle.yaml diff --git a/.github/tests/devnet-test.yaml b/.github/tests/ephemery.yaml similarity index 87% rename from .github/tests/devnet-test.yaml rename to .github/tests/ephemery.yaml index 72502f846..49118c7ff 100644 --- a/.github/tests/devnet-test.yaml +++ b/.github/tests/ephemery.yaml @@ -6,7 +6,7 @@ participants: - el_client_type: nethermind el_client_image: ethpandaops/nethermind:master cl_client_type: prysm - cl_client_image: ethpandaops/prysm:develop,ethpandaops/prysm-validator:develop + cl_client_image: gcr.io/prysmaticlabs/prysm/beacon-chain:latest,gcr.io/prysmaticlabs/prysm/validator:latest - el_client_type: erigon el_client_image: ethpandaops/erigon:devel cl_client_type: nimbus @@ -24,5 +24,5 @@ participants: cl_client_type: teku cl_client_image: ethpandaops/teku:master network_params: - network: "dencun-devnet-12" + network: "ephemery" additional_services: [] diff --git a/README.md b/README.md index 65ccc43a6..3645ae7f0 100644 --- a/README.md +++ b/README.md @@ -674,8 +674,7 @@ This note is from 2023-10-05 `flashbots/mev-boost-relay:0.27` and later support `capella_fork_epoch` at `0` but this seems to require a few flags enabled on the `lighthouse` beacon client including `--always-prefer-builder-payload` and `--disable-peer-scoring` -Users are recommended to use [`examples/capella-mev.yaml`](./examples/capella-mev.yaml); as inspiration for reliable payload -delivery. +Users are recommended to browse the example tests [`./.github/tests`](./.github/tests); as inspiration for different ways to use the package. ## Pre-funded accounts at Genesis diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 000000000..8914b2b31 --- /dev/null +++ b/examples/README.md @@ -0,0 +1 @@ +Please find the example configurations in the [../.github/tests](../.github/tests) directory. diff --git a/examples/capella-mev.yaml b/examples/capella-mev.yaml deleted file mode 100644 index c8724cd75..000000000 --- a/examples/capella-mev.yaml +++ /dev/null @@ -1,18 +0,0 @@ -mev_type: full -participants: -- el_client_type: geth - cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:unstable-c55608b - beacon_extra_params: - - "--always-prefer-builder-payload" - - "--disable-peer-scoring" -network_config: - deneb_fork_epoch: 1 -mev_params: - mev_flood_seconds_per_bundle: 12 - mev_flood_extra_args: - - "--txsPerBundle=300" - mev_flood_image: flashbots/mev-flood:0.0.9 - mev_relay_image: flashbots/mev-boost-relay:0.28.0a7 - mev_boost_image: flashbots/mev-boost:1.6.4844.dev5 - mev_builder_image: flashbots/builder:1.13.2.4844.dev7-4d161de diff --git a/examples/dencun.yaml b/examples/dencun.yaml deleted file mode 100644 index bad793e98..000000000 --- a/examples/dencun.yaml +++ /dev/null @@ -1,32 +0,0 @@ -participants: - - el_client_type: geth - el_client_image: ethpandaops/geth:master-5b57727 - cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:proposer-signature-cache-29ecfc3 - - el_client_type: erigon - el_client_image: ethpandaops/erigon:devel-8cfafa4 - cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:blobs-inclproof-d5a5a47 - - el_client_type: nethermind - el_client_image: ethpandaops/nethermind:master-dcec565 - cl_client_type: teku - cl_client_image: ethpandaops/teku:master-16c4354 - - el_client_type: besu - el_client_image: ethpandaops/besu:main-be5cc68 - cl_client_type: teku - cl_client_image: ethpandaops/teku:master-16c4354 - - el_client_type: reth - el_client_image: ethpandaops/reth:main-b0c4d99 - cl_client_type: nimbus - cl_client_image: ethpandaops/nimbus:unstable-15147cc - - el_client_type: geth - el_client_image: ethpandaops/geth:master-5b57727 - cl_client_type: prysm - cl_client_image: ethpandaops/prysm:develop-381116a,ethpandaops/prysm-validator:develop-linux-amd64-381116a -network_params: - deneb_fork_epoch: 1 -additional_services: - - el_forkmon - - tx_spammer - - dora -snooper_enabled: true diff --git a/examples/verkle-gen-nethermind.yaml b/examples/verkle-gen-nethermind.yaml deleted file mode 100644 index 63da51762..000000000 --- a/examples/verkle-gen-nethermind.yaml +++ /dev/null @@ -1,32 +0,0 @@ -participants: - - el_client_type: geth - el_client_image: ethpandaops/geth:gballet-kaustinen-with-shapella-fc8f4b9 - cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 - - el_client_type: geth - el_client_image: ethpandaops/geth:gballet-kaustinen-with-shapella-fc8f4b9 - cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b - - el_client_type: nethermind - el_client_image: nethermindeth/nethermind:kaustinen-648c6b8 - cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b - validator_count: 0 - el_client_log_level: "debug" - - el_client_type: nethermind - el_client_image: nethermindeth/nethermind:kaustinen-648c6b8 - cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b - validator_count: 0 - el_client_log_level: "debug" -network_params: - electra_fork_epoch: 0 - genesis_delay: 0 - seconds_per_slot: 8 -additional_services: - - el_forkmon - - tx_spammer - - dora - - prometheus_grafana -snooper_enabled: true -ethereum_metrics_exporter_enabled: true diff --git a/examples/verkle-gen.yaml b/examples/verkle-gen.yaml deleted file mode 100644 index 553a11f5a..000000000 --- a/examples/verkle-gen.yaml +++ /dev/null @@ -1,17 +0,0 @@ -participants: - - el_client_type: geth - el_client_image: ethpandaops/geth:gballet-kaustinen-with-shapella-fc8f4b9 - cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 - count: 2 - - el_client_type: geth - el_client_image: ethpandaops/geth:gballet-kaustinen-with-shapella-fc8f4b9 - cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b -network_params: - electra_fork_epoch: 0 -additional_services: - - el_forkmon - - tx_spammer - - dora -snooper_enabled: true diff --git a/examples/verkle.yaml b/examples/verkle.yaml deleted file mode 100644 index dc9de4217..000000000 --- a/examples/verkle.yaml +++ /dev/null @@ -1,17 +0,0 @@ -participants: - - el_client_type: geth - el_client_image: ethpandaops/geth:transition-post-genesis-1d80ebd - cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 - count: 2 - - el_client_type: geth - el_client_image: ethpandaops/geth:transition-post-genesis-1d80ebd - cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b -network_params: - electra_fork_epoch: 1 -additional_services: - - el_forkmon - - tx_spammer - - dora -snooper_enabled: true diff --git a/main.star b/main.star index 41b56d46f..1a24190c4 100644 --- a/main.star +++ b/main.star @@ -128,7 +128,7 @@ def run(plan, args={}): all_cl_client_contexts, args_with_right_defaults.participants, ) - if network_params.network == "kurtosis": + if network_params.network == constants.NETWORK_NAME.kurtosis: if network_params.deneb_fork_epoch != 0: plan.print("Launching 4788 contract deployer") el_uri = "http://{0}:{1}".format( diff --git a/src/cl/lighthouse/lighthouse_launcher.star b/src/cl/lighthouse/lighthouse_launcher.star index e2802a2aa..28b5bc15a 100644 --- a/src/cl/lighthouse/lighthouse_launcher.star +++ b/src/cl/lighthouse/lighthouse_launcher.star @@ -353,46 +353,59 @@ def get_beacon_config( "--metrics-allow-origin=*", "--metrics-port={0}".format(BEACON_METRICS_PORT_NUM), # ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^ + # Enable this flag once we have https://github.com/sigp/lighthouse/issues/5054 fixed + # "--allow-insecure-genesis-sync", ] if network not in constants.PUBLIC_NETWORKS: cmd.append("--testnet-dir=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER) - else: - cmd.append("--network=" + network) - cmd.append("--checkpoint-sync-url=" + constants.CHECKPOINT_SYNC_URL[network]) - - if network == "kurtosis": - if boot_cl_client_ctxs != None: + if network == constants.NETWORK_NAME.kurtosis: + if boot_cl_client_ctxs != None: + cmd.append( + "--boot-nodes=" + + ",".join( + [ + ctx.enr + for ctx in boot_cl_client_ctxs[: constants.MAX_ENR_ENTRIES] + ] + ) + ) + cmd.append( + "--trusted-peers=" + + ",".join( + [ + ctx.peer_id + for ctx in boot_cl_client_ctxs[: constants.MAX_ENR_ENTRIES] + ] + ) + ) + elif network == constants.NETWORK_NAME.ephemery: + cmd.append( + "--checkpoint-sync-url=" + constants.CHECKPOINT_SYNC_URL[network] + ) cmd.append( "--boot-nodes=" - + ",".join( - [ - ctx.enr - for ctx in boot_cl_client_ctxs[: constants.MAX_ENR_ENTRIES] - ] + + shared_utils.get_devnet_enrs_list( + plan, el_cl_genesis_data.files_artifact_uuid ) ) + else: # Devnets + # TODO Remove once checkpoint sync is working for verkle + if constants.NETWORK_NAME.verkle not in network: + cmd.append( + "--checkpoint-sync-url=https://checkpoint-sync.{0}.ethpandaops.io".format( + network + ) + ) cmd.append( - "--trusted-peers=" - + ",".join( - [ - ctx.peer_id - for ctx in boot_cl_client_ctxs[: constants.MAX_ENR_ENTRIES] - ] + "--boot-nodes=" + + shared_utils.get_devnet_enrs_list( + plan, el_cl_genesis_data.files_artifact_uuid ) ) - elif network not in constants.PUBLIC_NETWORKS: - cmd.append( - "--checkpoint-sync-url=https://checkpoint-sync.{0}.ethpandaops.io".format( - network - ) - ) - cmd.append( - "--boot-nodes=" - + shared_utils.get_devnet_enrs_list( - plan, el_cl_genesis_data.files_artifact_uuid - ) - ) + else: # Public networks + cmd.append("--network=" + network) + cmd.append("--checkpoint-sync-url=" + constants.CHECKPOINT_SYNC_URL[network]) if len(extra_params) > 0: # this is a repeated, we convert it into Starlark diff --git a/src/cl/lodestar/lodestar_launcher.star b/src/cl/lodestar/lodestar_launcher.star index f205a9cba..21fbe2e8f 100644 --- a/src/cl/lodestar/lodestar_launcher.star +++ b/src/cl/lodestar/lodestar_launcher.star @@ -327,7 +327,7 @@ def get_beacon_config( + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.ssz" ) - if network == "kurtosis": # kurtosis + if network == constants.NETWORK_NAME.kurtosis: if bootnode_contexts != None: cmd.append( "--bootnodes=" @@ -338,19 +338,29 @@ def get_beacon_config( ] ) ) - else: # devnet + elif network == constants.NETWORK_NAME.ephemery: + cmd.append("--checkpointSyncUrl=" + constants.CHECKPOINT_SYNC_URL[network]) cmd.append( - "--checkpointSyncUrl=https://checkpoint-sync.{0}.ethpandaops.io".format( - network + "--bootnodes=" + + shared_utils.get_devnet_enrs_list( + plan, el_cl_genesis_data.files_artifact_uuid ) ) + else: # Devnets + # TODO Remove once checkpoint sync is working for verkle + if constants.NETWORK_NAME.verkle not in network: + cmd.append( + "--checkpointSyncUrl=https://checkpoint-sync.{0}.ethpandaops.io".format( + network + ) + ) cmd.append( "--bootnodes=" + shared_utils.get_devnet_enrs_list( plan, el_cl_genesis_data.files_artifact_uuid ) ) - else: # public testnet + else: # Public testnet cmd.append("--network=" + network) cmd.append("--checkpointSyncUrl=" + constants.CHECKPOINT_SYNC_URL[network]) diff --git a/src/cl/nimbus/nimbus_launcher.star b/src/cl/nimbus/nimbus_launcher.star index acd8628cf..d8f9072b8 100644 --- a/src/cl/nimbus/nimbus_launcher.star +++ b/src/cl/nimbus/nimbus_launcher.star @@ -147,7 +147,7 @@ def launch( ) # Holesky has a bigger memory footprint, so it needs more memory - if launcher.network == "holesky": + if launcher.network == constants.NETWORK_NAME.holesky: holesky_beacon_memory_limit = 4096 bn_max_mem = ( int(bn_max_mem) if int(bn_max_mem) > 0 else holesky_beacon_memory_limit @@ -160,7 +160,7 @@ def launch( network_name = ( "devnets" - if launcher.network != "kurtosis" + if launcher.network != constants.NETWORK_NAME.kurtosis and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) @@ -374,21 +374,20 @@ def get_beacon_config( if node_keystore_files != None and not split_mode_enabled: cmd.extend(validator_flags) - if network == "kurtosis": - if bootnode_contexts == None: - # Copied from https://github.com/status-im/nimbus-eth2/blob/67ab477a27e358d605e99bffeb67f98d18218eca/scripts/launch_local_testnet.sh#L417 - # See explanation there - cmd.append("--subscribe-all-subnets") - else: - for ctx in bootnode_contexts[: constants.MAX_ENR_ENTRIES]: - cmd.append("--bootstrap-node=" + ctx.enr) - cmd.append("--direct-peer=" + ctx.multiaddr) - elif network not in constants.PUBLIC_NETWORKS: + if network not in constants.PUBLIC_NETWORKS: cmd.append( "--bootstrap-file=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/bootstrap_nodes.txt" ) + if network == constants.NETWORK_NAME.kurtosis: + if bootnode_contexts == None: + cmd.append("--subscribe-all-subnets") + else: + for ctx in bootnode_contexts[: constants.MAX_ENR_ENTRIES]: + cmd.append("--bootstrap-node=" + ctx.enr) + cmd.append("--direct-peer=" + ctx.multiaddr) + if len(extra_params) > 0: cmd.extend([param for param in extra_params]) diff --git a/src/cl/prysm/prysm_launcher.star b/src/cl/prysm/prysm_launcher.star index 5230b23b5..ff4cdf7fa 100644 --- a/src/cl/prysm/prysm_launcher.star +++ b/src/cl/prysm/prysm_launcher.star @@ -324,6 +324,7 @@ def get_beacon_config( "--monitoring-port={0}".format(BEACON_MONITORING_PORT_NUM) # ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^ ] + if network not in constants.PUBLIC_NETWORKS: cmd.append("--p2p-static-id=true") cmd.append( @@ -336,22 +337,41 @@ def get_beacon_config( + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.ssz", ) - if network == "kurtosis": # Kurtosis + if network == constants.NETWORK_NAME.kurtosis: if bootnode_contexts != None: for ctx in bootnode_contexts[: constants.MAX_ENR_ENTRIES]: cmd.append("--peer=" + ctx.multiaddr) cmd.append("--bootstrap-node=" + ctx.enr) - else: # Devnet + elif network == constants.NETWORK_NAME.ephemery: cmd.append( - "--checkpoint-sync-url=https://checkpoint-sync.{0}.ethpandaops.io".format( - network - ) + "--genesis-beacon-api-url=" + constants.CHECKPOINT_SYNC_URL[network] + ) + cmd.append( + "--checkpoint-sync-url=" + constants.CHECKPOINT_SYNC_URL[network] ) cmd.append( "--bootstrap-node=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/boot_enr.yaml" ) + else: # Devnets + # TODO Remove once checkpoint sync is working for verkle + if constants.NETWORK_NAME.verkle not in network: + cmd.append( + "--genesis-beacon-api-url=https://checkpoint-sync.{0}.ethpandaops.io".format( + network + ) + ) + cmd.append( + "--checkpoint-sync-url=https://checkpoint-sync.{0}.ethpandaops.io".format( + network + ) + ) + cmd.append( + "--bootstrap-node=" + + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + + "/boot_enr.yaml" + ) else: # Public network cmd.append("--{}".format(network)) cmd.append("--genesis-beacon-api-url=" + constants.CHECKPOINT_SYNC_URL[network]) diff --git a/src/cl/teku/teku_launcher.star b/src/cl/teku/teku_launcher.star index 4a36569c7..d2dfd47e6 100644 --- a/src/cl/teku/teku_launcher.star +++ b/src/cl/teku/teku_launcher.star @@ -370,46 +370,61 @@ def get_beacon_config( + el_client_context.client_name, ] + if node_keystore_files != None and not split_mode_enabled: + cmd.extend(validator_flags) + if network not in constants.PUBLIC_NETWORKS: cmd.append( "--initial-state=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.ssz" ) - else: - cmd.append("--checkpoint-sync-url=" + constants.CHECKPOINT_SYNC_URL[network]) - - if node_keystore_files != None and not split_mode_enabled: - cmd.extend(validator_flags) - if network == "kurtosis": - if bootnode_contexts != None: + if network == constants.NETWORK_NAME.kurtosis: + if bootnode_contexts != None: + cmd.append( + "--p2p-discovery-bootnodes=" + + ",".join( + [ + ctx.enr + for ctx in bootnode_contexts[: constants.MAX_ENR_ENTRIES] + ] + ) + ) + cmd.append( + "--p2p-static-peers=" + + ",".join( + [ + ctx.multiaddr + for ctx in bootnode_contexts[: constants.MAX_ENR_ENTRIES] + ] + ) + ) + elif network == constants.NETWORK_NAME.ephemery: + cmd.append( + "--checkpoint-sync-url=" + constants.CHECKPOINT_SYNC_URL[network] + ) cmd.append( "--p2p-discovery-bootnodes=" - + ",".join( - [ctx.enr for ctx in bootnode_contexts[: constants.MAX_ENR_ENTRIES]] + + shared_utils.get_devnet_enrs_list( + plan, el_cl_genesis_data.files_artifact_uuid ) ) + else: # Devnets + # TODO Remove once checkpoint sync is working for verkle + if constants.NETWORK_NAME.verkle not in network: + cmd.append( + "--checkpoint-sync-url=https://checkpoint-sync.{0}.ethpandaops.io".format( + network + ) + ) cmd.append( - "--p2p-static-peers=" - + ",".join( - [ - ctx.multiaddr - for ctx in bootnode_contexts[: constants.MAX_ENR_ENTRIES] - ] + "--p2p-discovery-bootnodes=" + + shared_utils.get_devnet_enrs_list( + plan, el_cl_genesis_data.files_artifact_uuid ) ) - elif network not in constants.PUBLIC_NETWORKS: - cmd.append( - "--checkpoint-sync-url=https://checkpoint-sync.{0}.ethpandaops.io".format( - network - ) - ) - cmd.append( - "--p2p-discovery-bootnodes=" - + shared_utils.get_devnet_enrs_list( - plan, el_cl_genesis_data.files_artifact_uuid - ) - ) + else: # Public networks + cmd.append("--checkpoint-sync-url=" + constants.CHECKPOINT_SYNC_URL[network]) if len(extra_params) > 0: # we do the list comprehension as the default extra_params is a proto repeated string diff --git a/src/el/besu/besu_launcher.star b/src/el/besu/besu_launcher.star index b589436d9..fe3c8b3a4 100644 --- a/src/el/besu/besu_launcher.star +++ b/src/el/besu/besu_launcher.star @@ -198,7 +198,7 @@ def get_config( else: cmd.append("--network=" + network) - if network == "kurtosis": + if network == constants.NETWORK_NAME.kurtosis: if len(existing_el_clients) > 0: cmd.append( "--bootnodes=" diff --git a/src/el/erigon/erigon_launcher.star b/src/el/erigon/erigon_launcher.star index 711476412..2306ac3c5 100644 --- a/src/el/erigon/erigon_launcher.star +++ b/src/el/erigon/erigon_launcher.star @@ -213,7 +213,7 @@ def get_config( size=el_volume_size, ) - if network == "kurtosis": + if network == constants.NETWORK_NAME.kurtosis: if len(existing_el_clients) > 0: cmd.append( "--bootnodes=" diff --git a/src/el/ethereumjs/ethereumjs_launcher.star b/src/el/ethereumjs/ethereumjs_launcher.star index a0a391a42..a0a0901f1 100644 --- a/src/el/ethereumjs/ethereumjs_launcher.star +++ b/src/el/ethereumjs/ethereumjs_launcher.star @@ -197,7 +197,7 @@ def get_config( else: cmd.append("--network=" + network) - if network == "kurtosis": + if network == constants.NETWORK_NAME.kurtosis: if len(existing_el_clients) > 0: cmd.append( "--bootnodes=" diff --git a/src/el/geth/geth_launcher.star b/src/el/geth/geth_launcher.star index df528ba9c..61e47160b 100644 --- a/src/el/geth/geth_launcher.star +++ b/src/el/geth/geth_launcher.star @@ -184,8 +184,10 @@ def get_config( el_volume_size, ): # TODO: Remove this once electra fork has path based storage scheme implemented - if electra_fork_epoch != None or "verkle" in network: - if electra_fork_epoch == 0 or "verkle-gen" in network: # verkle-gen + if electra_fork_epoch != None or constants.NETWORK_NAME.verkle in network: + if ( + electra_fork_epoch == 0 or constants.NETWORK_NAME.verkle + "-gen" in network + ): # verkle-gen init_datadir_cmd_str = "geth --datadir={0} --cache.preimages --override.prague={1} init {2}".format( EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, final_genesis_timestamp, @@ -273,7 +275,7 @@ def get_config( if "--ws.api" in arg: cmd[index] = "--ws.api=admin,engine,net,eth,web3,debug,mev,flashbots" - if network == "kurtosis": + if network == constants.NETWORK_NAME.kurtosis: if len(existing_el_clients) > 0: cmd.append( "--bootnodes=" diff --git a/src/el/nethermind/nethermind_launcher.star b/src/el/nethermind/nethermind_launcher.star index c7deeb771..0cfcef765 100644 --- a/src/el/nethermind/nethermind_launcher.star +++ b/src/el/nethermind/nethermind_launcher.star @@ -194,7 +194,7 @@ def get_config( else: cmd.append("--config=" + network) - if network == "kurtosis": + if network == constants.NETWORK_NAME.kurtosis: if len(existing_el_clients) > 0: cmd.append( "--Network.StaticPeers=" diff --git a/src/el/reth/reth_launcher.star b/src/el/reth/reth_launcher.star index 17ff0bf33..cadca6348 100644 --- a/src/el/reth/reth_launcher.star +++ b/src/el/reth/reth_launcher.star @@ -198,7 +198,7 @@ def get_config( "--authrpc.addr=0.0.0.0", "--metrics=0.0.0.0:{0}".format(METRICS_PORT_NUM), ] - if network == "kurtosis": + if network == constants.NETWORK_NAME.kurtosis: if len(existing_el_clients) > 0: cmd.append( "--bootnodes=" diff --git a/src/package_io/constants.star b/src/package_io/constants.star index 2dd9209b9..a0055a3cc 100644 --- a/src/package_io/constants.star +++ b/src/package_io/constants.star @@ -53,6 +53,16 @@ CAPELLA_FORK_VERSION = "0x40000038" DENEB_FORK_VERSION = "0x50000038" ELECTRA_FORK_VERSION = "0x60000038" +NETWORK_NAME = struct( + mainnet="mainnet", + goerli="goerli", + sepolia="sepolia", + holesky="holesky", + ephemery="ephemery", + kurtosis="kurtosis", + verkle="verkle", +) + PUBLIC_NETWORKS = ( "mainnet", "goerli", @@ -72,6 +82,7 @@ CHECKPOINT_SYNC_URL = { "goerli": "https://checkpoint-sync.goerli.ethpandaops.io", "sepolia": "https://checkpoint-sync.sepolia.ethpandaops.io", "holesky": "https://checkpoint-sync.holesky.ethpandaops.io", + "ephemery": "https://checkpointz.bordel.wtf/", } GENESIS_VALIDATORS_ROOT = { diff --git a/src/participant_network.star b/src/participant_network.star index cc0034ff0..329dfba22 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -67,7 +67,7 @@ def launch_participant_network( parallel_keystore_generation=False, ): num_participants = len(participants) - if network_params.network == "kurtosis": + if network_params.network == constants.NETWORK_NAME.kurtosis: # We are running a kurtosis network plan.print("Generating cl validator key stores") validator_data = None @@ -168,6 +168,27 @@ def launch_participant_network( final_genesis_timestamp = constants.GENESIS_TIME[network_params.network] network_id = constants.NETWORK_ID[network_params.network] validator_data = None + elif network_params.network == constants.NETWORK_NAME.ephemery: + el_cl_genesis_data_uuid = plan.run_sh( + run="mkdir -p /network-configs/ && \ + curl -o latest.tar.gz https://ephemery.dev/latest.tar.gz && \ + tar xvzf latest.tar.gz -C /network-configs && \ + cat /network-configs/genesis_validators_root.txt", + image="badouralix/curl-jq", + store=[StoreSpec(src="/network-configs/", name="el_cl_genesis_data")], + ) + genesis_validators_root = el_cl_genesis_data_uuid.output + el_cl_data = el_cl_genesis_data.new_el_cl_genesis_data( + el_cl_genesis_data_uuid.files_artifacts[0], + genesis_validators_root, + ) + final_genesis_timestamp = shared_utils.read_genesis_timestamp_from_config( + plan, el_cl_genesis_data_uuid.files_artifacts[0] + ) + network_id = shared_utils.read_genesis_network_id_from_config( + plan, el_cl_genesis_data_uuid.files_artifacts[0] + ) + validator_data = None else: # We are running a devnet url = calculate_devnet_url(network_params.network) @@ -187,10 +208,10 @@ def launch_participant_network( genesis_validators_root, ) final_genesis_timestamp = shared_utils.read_genesis_timestamp_from_config( - plan, el_cl_genesis_uuid + plan, el_cl_genesis_data_uuid.files_artifacts[0] ) network_id = shared_utils.read_genesis_network_id_from_config( - plan, el_cl_genesis_uuid + plan, el_cl_genesis_data_uuid.files_artifacts[0] ) validator_data = None @@ -318,12 +339,12 @@ def launch_participant_network( plan.print("Launching CL network") prysm_password_relative_filepath = ( validator_data.prysm_password_relative_filepath - if network_params.network == "kurtosis" + if network_params.network == constants.NETWORK_NAME.kurtosis else None ) prysm_password_artifact_uuid = ( validator_data.prysm_password_artifact_uuid - if network_params.network == "kurtosis" + if network_params.network == constants.NETWORK_NAME.kurtosis else None ) cl_launchers = { @@ -371,7 +392,7 @@ def launch_participant_network( all_xatu_sentry_contexts = [] preregistered_validator_keys_for_nodes = ( validator_data.per_node_keystores - if network_params.network == "kurtosis" + if network_params.network == constants.NETWORK_NAME.kurtosis else None ) diff --git a/src/shared_utils/shared_utils.star b/src/shared_utils/shared_utils.star index 541913cae..52bafa711 100644 --- a/src/shared_utils/shared_utils.star +++ b/src/shared_utils/shared_utils.star @@ -114,7 +114,7 @@ def read_genesis_timestamp_from_config(plan, filename): packages=["PyYAML"], run=""" import yaml -with open("/network-configs/config.yaml", "r") as f: +with open("/network-configs/network-configs/config.yaml", "r") as f: yaml_data = yaml.safe_load(f) min_genesis_time = int(yaml_data.get("MIN_GENESIS_TIME", 0)) @@ -132,7 +132,7 @@ def read_genesis_network_id_from_config(plan, filename): packages=["PyYAML"], run=""" import yaml -with open("/network-configs/config.yaml", "r") as f: +with open("/network-configs/network-configs/config.yaml", "r") as f: yaml_data = yaml.safe_load(f) network_id = int(yaml_data.get("DEPOSIT_NETWORK_ID", 0)) print(network_id, end="") From e36027b91de0ae8943012ffd6ba776142d2e2d78 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Mon, 22 Jan 2024 15:09:00 +0100 Subject: [PATCH 04/33] fix: add CL genesis delay to final genesis time (#469) --- README.md | 2 +- network_params.yaml | 2 +- src/package_io/input_parser.star | 2 +- src/participant_network.star | 4 +++- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 3645ae7f0..c9ccd9977 100644 --- a/README.md +++ b/README.md @@ -281,7 +281,7 @@ network_params: # The number of pre-registered validators for genesis. If 0 or not specified then the value will be calculated from the participants preregistered_validator_count: 0 # How long you want the network to wait before starting up - genesis_delay: 120 + genesis_delay: 20 # Max churn rate for the network introduced by # EIP-7514 https:#eips.ethereum.org/EIPS/eip-7514 diff --git a/network_params.yaml b/network_params.yaml index 984ecf931..8f533ac35 100644 --- a/network_params.yaml +++ b/network_params.yaml @@ -44,7 +44,7 @@ network_params: question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete" preregistered_validator_count: 0 - genesis_delay: 120 + genesis_delay: 20 max_churn: 8 ejection_balance: 16000000000 capella_fork_epoch: 0 diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index b3852d532..b55d9fbd5 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -479,7 +479,7 @@ def default_network_params(): "network_id": "3151908", "deposit_contract_address": "0x4242424242424242424242424242424242424242", "seconds_per_slot": 12, - "genesis_delay": 120, + "genesis_delay": 20, "max_churn": 8, "ejection_balance": 16000000000, "eth1_follow_distance": 2048, diff --git a/src/participant_network.star b/src/participant_network.star index 329dfba22..c52410ab9 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -91,7 +91,9 @@ def launch_participant_network( # We need to send the same genesis time to both the EL and the CL to ensure that timestamp based forking works as expected final_genesis_timestamp = get_final_genesis_timestamp( plan, - CL_GENESIS_DATA_GENERATION_TIME + num_participants * CL_NODE_STARTUP_TIME, + network_params.genesis_delay + + CL_GENESIS_DATA_GENERATION_TIME + + num_participants * CL_NODE_STARTUP_TIME, ) # if preregistered validator count is 0 (default) then calculate the total number of validators from the participants From 26384eab07859b0c2343812669494cea388b38f6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 23 Jan 2024 10:46:32 +0000 Subject: [PATCH 05/33] chore(main): release 1.3.0 (#436) :robot: I have created a release *beep* *boop* --- ## [1.3.0](https://github.com/kurtosis-tech/ethereum-package/compare/1.2.0...1.3.0) (2024-01-22) ### Features * add assertoor to additional toolings ([#419](https://github.com/kurtosis-tech/ethereum-package/issues/419)) ([76dde3e](https://github.com/kurtosis-tech/ethereum-package/commit/76dde3ed421da0d7f8ba16f46565b07019be76c0)) * add devnets support ([#384](https://github.com/kurtosis-tech/ethereum-package/issues/384)) ([2bae099](https://github.com/kurtosis-tech/ethereum-package/commit/2bae09931ed1cdcfe499efaae420c981dabcea62)) * add pitfalls for persistent storage as a warning ([#441](https://github.com/kurtosis-tech/ethereum-package/issues/441)) ([69da8f0](https://github.com/kurtosis-tech/ethereum-package/commit/69da8f04fcfd5ce19365bd89ca73c13cbc40d76a)) * add support for testnets ([#437](https://github.com/kurtosis-tech/ethereum-package/issues/437)) ([5584cc8](https://github.com/kurtosis-tech/ethereum-package/commit/5584cc84c50ca9845c544810fb8331ec8fcdcbc8)) * Add Xatu Sentry ([#466](https://github.com/kurtosis-tech/ethereum-package/issues/466)) ([b9523cb](https://github.com/kurtosis-tech/ethereum-package/commit/b9523cb7083be78c96bb88a7ca86d142cb0eec1d)) * enable checkpoint sync for devnets ([#448](https://github.com/kurtosis-tech/ethereum-package/issues/448)) ([b367cfe](https://github.com/kurtosis-tech/ethereum-package/commit/b367cfe875900bdc8aa70dc8b1d8aebdbcf81593)) * enable persistence ([#422](https://github.com/kurtosis-tech/ethereum-package/issues/422)) ([8d40056](https://github.com/kurtosis-tech/ethereum-package/commit/8d400566aa54132dccaa7ff129adc12e547907a0)) * enable syncing ephemery ([#459](https://github.com/kurtosis-tech/ethereum-package/issues/459)) ([f8289cb](https://github.com/kurtosis-tech/ethereum-package/commit/f8289cb49f68dd488635d2313c007ee7c2f4dbf3)) * enable syncing shadowforks ([#457](https://github.com/kurtosis-tech/ethereum-package/issues/457)) ([313a586](https://github.com/kurtosis-tech/ethereum-package/commit/313a586965efa6739e8d4055f1263a89d48ff499)) ### Bug Fixes * add CL genesis delay to final genesis time ([#469](https://github.com/kurtosis-tech/ethereum-package/issues/469)) ([e36027b](https://github.com/kurtosis-tech/ethereum-package/commit/e36027b91de0ae8943012ffd6ba776142d2e2d78)) * add prysm-multiarch upstream image ([#451](https://github.com/kurtosis-tech/ethereum-package/issues/451)) ([6feba23](https://github.com/kurtosis-tech/ethereum-package/commit/6feba237fbdfae021402ceeec89baa75df6d83d5)) * added supprot for boot enr file ([#456](https://github.com/kurtosis-tech/ethereum-package/issues/456)) ([fd26e5c](https://github.com/kurtosis-tech/ethereum-package/commit/fd26e5c31609b48e1d6718f72d295a27a7d84a49)) * bump max mem limit for nimbus on holesky ([#439](https://github.com/kurtosis-tech/ethereum-package/issues/439)) ([fb84787](https://github.com/kurtosis-tech/ethereum-package/commit/fb84787694faa86872828b92529f51e6c9ac7d44)) * dora template fix ([#452](https://github.com/kurtosis-tech/ethereum-package/issues/452)) ([f9243ea](https://github.com/kurtosis-tech/ethereum-package/commit/f9243ea8cdec8a0145206831c9c043269c80e863)) * enable ws for geth ([#446](https://github.com/kurtosis-tech/ethereum-package/issues/446)) ([d5bf451](https://github.com/kurtosis-tech/ethereum-package/commit/d5bf45150dc09432bb84b366d2deda8c6036afea)) * erigon chain should be set to dev ([#447](https://github.com/kurtosis-tech/ethereum-package/issues/447)) ([1f40d84](https://github.com/kurtosis-tech/ethereum-package/commit/1f40d8402666310cad81066852110aa20627471b)) * erigon command arg ([#454](https://github.com/kurtosis-tech/ethereum-package/issues/454)) ([5ae56a1](https://github.com/kurtosis-tech/ethereum-package/commit/5ae56a17773122827b074963dee40a43a00478ea)) * fix typo ([#440](https://github.com/kurtosis-tech/ethereum-package/issues/440)) ([933a313](https://github.com/kurtosis-tech/ethereum-package/commit/933a3133bf9b1fe96ea3c537b26c3c8ced0a35e3)) * guid fix for besu/teku/erigon/nimbus ([#443](https://github.com/kurtosis-tech/ethereum-package/issues/443)) ([2283464](https://github.com/kurtosis-tech/ethereum-package/commit/2283464b614b0ade4aa98fccd842e8e4b23e188a)) * increase db size for geth ([#453](https://github.com/kurtosis-tech/ethereum-package/issues/453)) ([0c67998](https://github.com/kurtosis-tech/ethereum-package/commit/0c67998567a4ab60dd0355b734076ee47b988326)) * logging bug ([#462](https://github.com/kurtosis-tech/ethereum-package/issues/462)) ([f6098a1](https://github.com/kurtosis-tech/ethereum-package/commit/f6098a1572923655426f25eab936b7a0b9fbc116)) * parallel key generation ([#423](https://github.com/kurtosis-tech/ethereum-package/issues/423)) ([060fd8f](https://github.com/kurtosis-tech/ethereum-package/commit/060fd8fb3ed8e12be895a43912787313c1ad4a5f)) * re-add networkid ([#464](https://github.com/kurtosis-tech/ethereum-package/issues/464)) ([4d96409](https://github.com/kurtosis-tech/ethereum-package/commit/4d96409cdbd1a367fc1e924cb9183eadce4eeae7)) * typo ([#445](https://github.com/kurtosis-tech/ethereum-package/issues/445)) ([e61c58a](https://github.com/kurtosis-tech/ethereum-package/commit/e61c58a8c2944cbf2699bd75d25a2e63d8e0621c)) * Update nethermind to expose host on 0.0.0.0 ([#467](https://github.com/kurtosis-tech/ethereum-package/issues/467)) ([0bd29dd](https://github.com/kurtosis-tech/ethereum-package/commit/0bd29dd7d61dae77b7820f79d46e8a52e74267c2)) * use all enrs for nimbus via bootstrap file ([#450](https://github.com/kurtosis-tech/ethereum-package/issues/450)) ([bb5a0c1](https://github.com/kurtosis-tech/ethereum-package/commit/bb5a0c1b5b051b23b185cfd366a2dfed3f44d903)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 36 ++++++++++++++++++++++++++++++++++++ version.txt | 2 +- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 330c2e12b..624d28393 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,41 @@ # Changelog +## [1.3.0](https://github.com/kurtosis-tech/ethereum-package/compare/1.2.0...1.3.0) (2024-01-22) + + +### Features + +* add assertoor to additional toolings ([#419](https://github.com/kurtosis-tech/ethereum-package/issues/419)) ([76dde3e](https://github.com/kurtosis-tech/ethereum-package/commit/76dde3ed421da0d7f8ba16f46565b07019be76c0)) +* add devnets support ([#384](https://github.com/kurtosis-tech/ethereum-package/issues/384)) ([2bae099](https://github.com/kurtosis-tech/ethereum-package/commit/2bae09931ed1cdcfe499efaae420c981dabcea62)) +* add pitfalls for persistent storage as a warning ([#441](https://github.com/kurtosis-tech/ethereum-package/issues/441)) ([69da8f0](https://github.com/kurtosis-tech/ethereum-package/commit/69da8f04fcfd5ce19365bd89ca73c13cbc40d76a)) +* add support for testnets ([#437](https://github.com/kurtosis-tech/ethereum-package/issues/437)) ([5584cc8](https://github.com/kurtosis-tech/ethereum-package/commit/5584cc84c50ca9845c544810fb8331ec8fcdcbc8)) +* Add Xatu Sentry ([#466](https://github.com/kurtosis-tech/ethereum-package/issues/466)) ([b9523cb](https://github.com/kurtosis-tech/ethereum-package/commit/b9523cb7083be78c96bb88a7ca86d142cb0eec1d)) +* enable checkpoint sync for devnets ([#448](https://github.com/kurtosis-tech/ethereum-package/issues/448)) ([b367cfe](https://github.com/kurtosis-tech/ethereum-package/commit/b367cfe875900bdc8aa70dc8b1d8aebdbcf81593)) +* enable persistence ([#422](https://github.com/kurtosis-tech/ethereum-package/issues/422)) ([8d40056](https://github.com/kurtosis-tech/ethereum-package/commit/8d400566aa54132dccaa7ff129adc12e547907a0)) +* enable syncing ephemery ([#459](https://github.com/kurtosis-tech/ethereum-package/issues/459)) ([f8289cb](https://github.com/kurtosis-tech/ethereum-package/commit/f8289cb49f68dd488635d2313c007ee7c2f4dbf3)) +* enable syncing shadowforks ([#457](https://github.com/kurtosis-tech/ethereum-package/issues/457)) ([313a586](https://github.com/kurtosis-tech/ethereum-package/commit/313a586965efa6739e8d4055f1263a89d48ff499)) + + +### Bug Fixes + +* add CL genesis delay to final genesis time ([#469](https://github.com/kurtosis-tech/ethereum-package/issues/469)) ([e36027b](https://github.com/kurtosis-tech/ethereum-package/commit/e36027b91de0ae8943012ffd6ba776142d2e2d78)) +* add prysm-multiarch upstream image ([#451](https://github.com/kurtosis-tech/ethereum-package/issues/451)) ([6feba23](https://github.com/kurtosis-tech/ethereum-package/commit/6feba237fbdfae021402ceeec89baa75df6d83d5)) +* added supprot for boot enr file ([#456](https://github.com/kurtosis-tech/ethereum-package/issues/456)) ([fd26e5c](https://github.com/kurtosis-tech/ethereum-package/commit/fd26e5c31609b48e1d6718f72d295a27a7d84a49)) +* bump max mem limit for nimbus on holesky ([#439](https://github.com/kurtosis-tech/ethereum-package/issues/439)) ([fb84787](https://github.com/kurtosis-tech/ethereum-package/commit/fb84787694faa86872828b92529f51e6c9ac7d44)) +* dora template fix ([#452](https://github.com/kurtosis-tech/ethereum-package/issues/452)) ([f9243ea](https://github.com/kurtosis-tech/ethereum-package/commit/f9243ea8cdec8a0145206831c9c043269c80e863)) +* enable ws for geth ([#446](https://github.com/kurtosis-tech/ethereum-package/issues/446)) ([d5bf451](https://github.com/kurtosis-tech/ethereum-package/commit/d5bf45150dc09432bb84b366d2deda8c6036afea)) +* erigon chain should be set to dev ([#447](https://github.com/kurtosis-tech/ethereum-package/issues/447)) ([1f40d84](https://github.com/kurtosis-tech/ethereum-package/commit/1f40d8402666310cad81066852110aa20627471b)) +* erigon command arg ([#454](https://github.com/kurtosis-tech/ethereum-package/issues/454)) ([5ae56a1](https://github.com/kurtosis-tech/ethereum-package/commit/5ae56a17773122827b074963dee40a43a00478ea)) +* fix typo ([#440](https://github.com/kurtosis-tech/ethereum-package/issues/440)) ([933a313](https://github.com/kurtosis-tech/ethereum-package/commit/933a3133bf9b1fe96ea3c537b26c3c8ced0a35e3)) +* guid fix for besu/teku/erigon/nimbus ([#443](https://github.com/kurtosis-tech/ethereum-package/issues/443)) ([2283464](https://github.com/kurtosis-tech/ethereum-package/commit/2283464b614b0ade4aa98fccd842e8e4b23e188a)) +* increase db size for geth ([#453](https://github.com/kurtosis-tech/ethereum-package/issues/453)) ([0c67998](https://github.com/kurtosis-tech/ethereum-package/commit/0c67998567a4ab60dd0355b734076ee47b988326)) +* logging bug ([#462](https://github.com/kurtosis-tech/ethereum-package/issues/462)) ([f6098a1](https://github.com/kurtosis-tech/ethereum-package/commit/f6098a1572923655426f25eab936b7a0b9fbc116)) +* parallel key generation ([#423](https://github.com/kurtosis-tech/ethereum-package/issues/423)) ([060fd8f](https://github.com/kurtosis-tech/ethereum-package/commit/060fd8fb3ed8e12be895a43912787313c1ad4a5f)) +* re-add networkid ([#464](https://github.com/kurtosis-tech/ethereum-package/issues/464)) ([4d96409](https://github.com/kurtosis-tech/ethereum-package/commit/4d96409cdbd1a367fc1e924cb9183eadce4eeae7)) +* typo ([#445](https://github.com/kurtosis-tech/ethereum-package/issues/445)) ([e61c58a](https://github.com/kurtosis-tech/ethereum-package/commit/e61c58a8c2944cbf2699bd75d25a2e63d8e0621c)) +* Update nethermind to expose host on 0.0.0.0 ([#467](https://github.com/kurtosis-tech/ethereum-package/issues/467)) ([0bd29dd](https://github.com/kurtosis-tech/ethereum-package/commit/0bd29dd7d61dae77b7820f79d46e8a52e74267c2)) +* use all enrs for nimbus via bootstrap file ([#450](https://github.com/kurtosis-tech/ethereum-package/issues/450)) ([bb5a0c1](https://github.com/kurtosis-tech/ethereum-package/commit/bb5a0c1b5b051b23b185cfd366a2dfed3f44d903)) + ## [1.2.0](https://github.com/kurtosis-tech/ethereum-package/compare/1.1.0...1.2.0) (2024-01-03) diff --git a/version.txt b/version.txt index 26aaba0e8..f0bb29e76 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -1.2.0 +1.3.0 From 5db6611ab831a92212a21859b42a911cd12bce0c Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Mon, 29 Jan 2024 10:06:53 +0100 Subject: [PATCH 06/33] feat: enable custom resource limit per network (#471) --- .github/tests/tolerations.yaml | 36 ++++ README.md | 76 +++++++++ main.star | 2 + src/cl/lighthouse/lighthouse_launcher.star | 43 ++++- src/cl/lodestar/lodestar_launcher.star | 41 ++++- src/cl/nimbus/nimbus_launcher.star | 51 ++++-- src/cl/prysm/prysm_launcher.star | 42 ++++- src/cl/teku/teku_launcher.star | 44 +++-- src/el/besu/besu_launcher.star | 32 +++- src/el/erigon/erigon_launcher.star | 35 +++- src/el/ethereumjs/ethereumjs_launcher.star | 31 +++- src/el/geth/geth_launcher.star | 31 +++- src/el/nethermind/nethermind_launcher.star | 34 +++- src/el/reth/reth_launcher.star | 32 +++- src/package_io/constants.star | 184 +++++++++++++++++++++ src/package_io/input_parser.star | 49 ++++++ src/participant_network.star | 12 ++ 17 files changed, 680 insertions(+), 95 deletions(-) create mode 100644 .github/tests/tolerations.yaml diff --git a/.github/tests/tolerations.yaml b/.github/tests/tolerations.yaml new file mode 100644 index 000000000..140938834 --- /dev/null +++ b/.github/tests/tolerations.yaml @@ -0,0 +1,36 @@ +participants: + - el_client_type: reth + cl_client_type: teku + cl_split_mode_enabled: true + cl_tolerations: + - key: "node-role.kubernetes.io/master1" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/master2" + operator: "Exists" + effect: "NoSchedule" + el_tolerations: + - key: "node-role.kubernetes.io/master3" + operator: "Exists" + effect: "NoSchedule" + validator_tolerations: + - key: "node-role.kubernetes.io/master4" + operator: "Exists" + effect: "NoSchedule" + - el_client_type: reth + cl_client_type: teku + cl_split_mode_enabled: true + tolerations: + - key: "node-role.kubernetes.io/master5" + operator: "Exists" + effect: "NoSchedule" + - el_client_type: reth + cl_client_type: teku + cl_split_mode_enabled: true +additional_services: + - dora +global_tolerations: + - key: "node-role.kubernetes.io/master6" + value: "true" + operator: "Equal" + effect: "NoSchedule" diff --git a/README.md b/README.md index c9ccd9977..85c0fd7c9 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,26 @@ To mitigate these issues, you can use the `el_client_volume_size` and `cl_client For optimal performance, we recommend using a cloud provider that allows you to provision Kubernetes clusters with fast persistent storage or self hosting your own Kubernetes cluster with fast persistent storage. +#### Taints and tolerations +It is possible to run the package on a Kubernetes cluster with taints and tolerations. This is done by adding the tolerations to the `tolerations` field in the `network_params.yaml` file. For example: +```yaml +participants: + - el_client_type: reth + cl_client_type: teku +global_tolerations: + - key: "node-role.kubernetes.io/master6" + value: "true" + operator: "Equal" + effect: "NoSchedule" +``` + +It is possible to define toleration globally, per participant or per container. The order of precedence is as follows: +1. Container (`el_tolerations`, `cl_tolerations`, `validator_tolerations`) +2. Participant (`tolerations`) +3. Global (`global_tolerations`) + +This feature is only available for Kubernetes. To learn more about taints and tolerations, please visit the [Kubernetes documentation](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). + #### Tear down The testnet will reside in an [enclave][enclave] - an isolated, ephemeral environment. The enclave and its contents (e.g. running containers, files artifacts, etc) will persist until torn down. You can remove an enclave and its contents with: @@ -147,6 +167,17 @@ participants: # Example; el_extra_labels: {"ethereum-package.partition": "1"} el_extra_labels: {} + # A list of tolerations that will be passed to the EL client container + # Only works with Kubernetes + # Example: el_tolerations: + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # toleration_seconds: 3600 + # Defaults to empty + el_tolerations: [] + # The type of CL client that should be started # Valid values are nimbus, lighthouse, lodestar, teku, and prysm cl_client_type: lighthouse @@ -178,6 +209,40 @@ participants: # Default values can be found in /src/package_io/constants.star VOLUME_SIZE cl_client_volume_size: 0 + # A list of tolerations that will be passed to the CL client container + # Only works with Kubernetes + # Example: el_tolerations: + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # toleration_seconds: 3600 + # Defaults to empty + el_tolerations: [] + + # A list of tolerations that will be passed to the validator container + # Only works with Kubernetes + # Example: el_tolerations: + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # toleration_seconds: 3600 + # Defaults to empty + validator_tolerations: [] + + # A list of tolerations that will be passed to the EL/CL/validator containers + # This is to be used when you don't want to specify the tolerations for each container separately + # Only works with Kubernetes + # Example: tolerations: + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # toleration_seconds: 3600 + # Defaults to empty + tolerations: [] + # A list of optional extra params that will be passed to the CL client Beacon container for modifying its behaviour # If the client combines the Beacon & validator nodes (e.g. Teku, Nimbus), then this list will be passed to the combined Beacon-validator node beacon_extra_params: [] @@ -495,6 +560,17 @@ xatu_sentry_params: - voluntary_exit - contribution_and_proof - blob_sidecar + +# Global tolerations that will be passed to all containers (unless overridden by a more specific toleration) +# Only works with Kubernetes +# Example: tolerations: +# - key: "key" +# operator: "Equal" +# value: "value" +# effect: "NoSchedule" +# toleration_seconds: 3600 +# Defaults to empty +global_tolerations: [] ``` #### Example configurations diff --git a/main.star b/main.star index 1a24190c4..4a219d12d 100644 --- a/main.star +++ b/main.star @@ -62,6 +62,7 @@ def run(plan, args={}): parallel_keystore_generation = args_with_right_defaults.parallel_keystore_generation persistent = args_with_right_defaults.persistent xatu_sentry_params = args_with_right_defaults.xatu_sentry_params + global_tolerations = args_with_right_defaults.global_tolerations grafana_datasource_config_template = read_file( static_files.GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH @@ -96,6 +97,7 @@ def run(plan, args={}): jwt_file, persistent, xatu_sentry_params, + global_tolerations, parallel_keystore_generation, ) diff --git a/src/cl/lighthouse/lighthouse_launcher.star b/src/cl/lighthouse/lighthouse_launcher.star index 28b5bc15a..ed7096e84 100644 --- a/src/cl/lighthouse/lighthouse_launcher.star +++ b/src/cl/lighthouse/lighthouse_launcher.star @@ -28,9 +28,7 @@ BEACON_METRICS_PORT_NUM = 5054 # The min/max CPU/memory that the beacon node can use BEACON_MIN_CPU = 50 -BEACON_MAX_CPU = 1000 BEACON_MIN_MEMORY = 256 -BEACON_MAX_MEMORY = 1024 # ---------------------------------- Validator client ------------------------------------- VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS = "/data/lighthouse/validator-keys" @@ -84,7 +82,7 @@ VALIDATOR_USED_PORTS = { ), } -LIGHTHOUSE_LOG_LEVELS = { +VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", @@ -121,6 +119,10 @@ def launch( extra_validator_labels, persistent, cl_volume_size, + cl_tolerations, + validator_tolerations, + participant_tolerations, + global_tolerations, split_mode_enabled=False, ): beacon_service_name = "{0}".format(service_name) @@ -129,19 +131,34 @@ def launch( ) log_level = input_parser.get_client_log_level_or_default( - participant_log_level, global_log_level, LIGHTHOUSE_LOG_LEVELS + participant_log_level, global_log_level, VERBOSITY_LEVELS + ) + + tolerations = input_parser.get_client_tolerations( + cl_tolerations, participant_tolerations, global_tolerations ) - bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU - bn_max_cpu = int(bn_max_cpu) if int(bn_max_cpu) > 0 else BEACON_MAX_CPU - bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY - bn_max_mem = int(bn_max_mem) if int(bn_max_mem) > 0 else BEACON_MAX_MEMORY network_name = ( "devnets" if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU + bn_max_cpu = ( + int(bn_max_cpu) + if int(bn_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["lighthouse_max_cpu"] + ) + bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY + bn_max_mem = ( + int(bn_max_mem) + if int(bn_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["lighthouse_max_mem"] + ) + cl_volume_size = ( int(cl_volume_size) if int(cl_volume_size) > 0 @@ -169,6 +186,7 @@ def launch( extra_beacon_labels, persistent, cl_volume_size, + tolerations, ) beacon_service = plan.add_service(beacon_service_name, beacon_config) @@ -203,7 +221,9 @@ def launch( v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY - + tolerations = input_parser.get_client_tolerations( + validator_tolerations, participant_tolerations, global_tolerations + ) validator_config = get_validator_config( launcher.el_cl_genesis_data, image, @@ -219,6 +239,7 @@ def launch( extra_validator_params, extra_validator_labels, persistent, + tolerations, ) validator_service = plan.add_service(validator_service_name, validator_config) @@ -297,6 +318,7 @@ def get_beacon_config( extra_labels, persistent, cl_volume_size, + tolerations, ): # If snooper is enabled use the snooper engine context, otherwise use the execution client context if snooper_enabled: @@ -445,6 +467,7 @@ def get_beacon_config( el_client_context.client_name, extra_labels, ), + tolerations=tolerations, ) @@ -463,6 +486,7 @@ def get_validator_config( extra_params, extra_labels, persistent, + tolerations, ): validator_keys_dirpath = shared_utils.path_join( VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS, @@ -528,6 +552,7 @@ def get_validator_config( el_client_context.client_name, extra_labels, ), + tolerations=tolerations, ) diff --git a/src/cl/lodestar/lodestar_launcher.star b/src/cl/lodestar/lodestar_launcher.star index 21fbe2e8f..243218f4b 100644 --- a/src/cl/lodestar/lodestar_launcher.star +++ b/src/cl/lodestar/lodestar_launcher.star @@ -22,9 +22,7 @@ METRICS_PORT_NUM = 8008 # The min/max CPU/memory that the beacon node can use BEACON_MIN_CPU = 50 -BEACON_MAX_CPU = 1000 BEACON_MIN_MEMORY = 256 -BEACON_MAX_MEMORY = 1024 # ---------------------------------- Validator client ------------------------------------- VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/validator-keys" @@ -63,7 +61,7 @@ VALIDATOR_USED_PORTS = { } -LODESTAR_LOG_LEVELS = { +VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", @@ -100,6 +98,10 @@ def launch( extra_validator_labels, persistent, cl_volume_size, + cl_tolerations, + validator_tolerations, + participant_tolerations, + global_tolerations, split_mode_enabled=False, ): beacon_service_name = "{0}".format(service_name) @@ -107,20 +109,34 @@ def launch( service_name, VALIDATOR_SUFFIX_SERVICE_NAME ) log_level = input_parser.get_client_log_level_or_default( - participant_log_level, global_log_level, LODESTAR_LOG_LEVELS + participant_log_level, global_log_level, VERBOSITY_LEVELS ) - bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU - bn_max_cpu = int(bn_max_cpu) if int(bn_max_cpu) > 0 else BEACON_MAX_CPU - bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY - bn_max_mem = int(bn_max_mem) if int(bn_max_mem) > 0 else BEACON_MAX_MEMORY + tolerations = input_parser.get_client_tolerations( + cl_tolerations, participant_tolerations, global_tolerations + ) network_name = ( "devnets" if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU + bn_max_cpu = ( + int(bn_max_cpu) + if int(bn_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["lodestar_max_cpu"] + ) + bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY + bn_max_mem = ( + int(bn_max_mem) + if int(bn_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["lodestar_max_mem"] + ) + cl_volume_size = ( int(cl_volume_size) if int(cl_volume_size) > 0 @@ -148,6 +164,7 @@ def launch( extra_beacon_labels, persistent, cl_volume_size, + tolerations, ) beacon_service = plan.add_service(beacon_service_name, beacon_config) @@ -183,6 +200,9 @@ def launch( v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY + tolerations = input_parser.get_client_tolerations( + validator_tolerations, participant_tolerations, global_tolerations + ) validator_config = get_validator_config( launcher.el_cl_genesis_data, image, @@ -198,6 +218,7 @@ def launch( extra_validator_params, extra_validator_labels, persistent, + tolerations, ) plan.add_service(validator_service_name, validator_config) @@ -268,6 +289,7 @@ def get_beacon_config( extra_labels, persistent, cl_volume_size, + tolerations, ): el_client_rpc_url_str = "http://{0}:{1}".format( el_client_context.ip_addr, @@ -397,6 +419,7 @@ def get_beacon_config( el_client_context.client_name, extra_labels, ), + tolerations=tolerations, ) @@ -415,6 +438,7 @@ def get_validator_config( extra_params, extra_labels, persistent, + tolerations, ): root_dirpath = shared_utils.path_join( VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER, service_name @@ -478,6 +502,7 @@ def get_validator_config( el_client_context.client_name, extra_labels, ), + tolerations=tolerations, ) diff --git a/src/cl/nimbus/nimbus_launcher.star b/src/cl/nimbus/nimbus_launcher.star index d8f9072b8..8148725c2 100644 --- a/src/cl/nimbus/nimbus_launcher.star +++ b/src/cl/nimbus/nimbus_launcher.star @@ -23,9 +23,7 @@ BEACON_METRICS_PORT_NUM = 8008 # The min/max CPU/memory that the beacon node can use BEACON_MIN_CPU = 50 -BEACON_MAX_CPU = 1000 BEACON_MIN_MEMORY = 256 -BEACON_MAX_MEMORY = 1024 DEFAULT_BEACON_IMAGE_ENTRYPOINT = ["nimbus_beacon_node"] @@ -96,7 +94,7 @@ VALIDATOR_USED_PORTS = { ), } -NIMBUS_LOG_LEVELS = { +VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", constants.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO", @@ -135,6 +133,10 @@ def launch( extra_validator_labels, persistent, cl_volume_size, + cl_tolerations, + validator_tolerations, + participant_tolerations, + global_tolerations, split_mode_enabled, ): beacon_service_name = "{0}".format(service_name) @@ -143,27 +145,34 @@ def launch( ) log_level = input_parser.get_client_log_level_or_default( - participant_log_level, global_log_level, NIMBUS_LOG_LEVELS + participant_log_level, global_log_level, VERBOSITY_LEVELS ) - # Holesky has a bigger memory footprint, so it needs more memory - if launcher.network == constants.NETWORK_NAME.holesky: - holesky_beacon_memory_limit = 4096 - bn_max_mem = ( - int(bn_max_mem) if int(bn_max_mem) > 0 else holesky_beacon_memory_limit - ) - - bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU - bn_max_cpu = int(bn_max_cpu) if int(bn_max_cpu) > 0 else BEACON_MAX_CPU - bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY - bn_max_mem = int(bn_max_mem) if int(bn_max_mem) > 0 else BEACON_MAX_MEMORY + tolerations = input_parser.get_client_tolerations( + cl_tolerations, participant_tolerations, global_tolerations + ) network_name = ( "devnets" - if launcher.network != constants.NETWORK_NAME.kurtosis + if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU + bn_max_cpu = ( + int(bn_max_cpu) + if int(bn_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["nimbus_max_cpu"] + ) + bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY + bn_max_mem = ( + int(bn_max_mem) + if int(bn_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["nimbus_max_mem"] + ) + cl_volume_size = ( int(cl_volume_size) if int(cl_volume_size) > 0 @@ -192,6 +201,7 @@ def launch( split_mode_enabled, persistent, cl_volume_size, + tolerations, ) beacon_service = plan.add_service(beacon_service_name, beacon_config) @@ -232,7 +242,9 @@ def launch( v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY - + tolerations = input_parser.get_client_tolerations( + validator_tolerations, participant_tolerations, global_tolerations + ) validator_config = get_validator_config( launcher.el_cl_genesis_data, image, @@ -248,6 +260,7 @@ def launch( extra_validator_params, extra_validator_labels, persistent, + tolerations, ) validator_service = plan.add_service(validator_service_name, validator_config) @@ -302,6 +315,7 @@ def get_beacon_config( split_mode_enabled, persistent, cl_volume_size, + tolerations, ): validator_keys_dirpath = "" validator_secrets_dirpath = "" @@ -427,6 +441,7 @@ def get_beacon_config( extra_labels, ), user=User(uid=0, gid=0), + tolerations=tolerations, ) @@ -445,6 +460,7 @@ def get_validator_config( extra_params, extra_labels, persistent, + tolerations, ): validator_keys_dirpath = "" validator_secrets_dirpath = "" @@ -498,6 +514,7 @@ def get_validator_config( el_client_context.client_name, extra_labels, ), + tolerations=tolerations, ) diff --git a/src/cl/prysm/prysm_launcher.star b/src/cl/prysm/prysm_launcher.star index ff4cdf7fa..90b95cd65 100644 --- a/src/cl/prysm/prysm_launcher.star +++ b/src/cl/prysm/prysm_launcher.star @@ -26,9 +26,7 @@ BEACON_MONITORING_PORT_NUM = 8080 # The min/max CPU/memory that the beacon node can use BEACON_MIN_CPU = 100 -BEACON_MAX_CPU = 2000 BEACON_MIN_MEMORY = 256 -BEACON_MAX_MEMORY = 1024 # ---------------------------------- Validator client ------------------------------------- VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/prysm/validator-data/" @@ -75,7 +73,7 @@ VALIDATOR_NODE_USED_PORTS = { ), } -PRYSM_LOG_LEVELS = { +VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", @@ -112,6 +110,10 @@ def launch( extra_validator_labels, persistent, cl_volume_size, + cl_tolerations, + validator_tolerations, + participant_tolerations, + global_tolerations, split_mode_enabled=False, ): split_images = images.split(IMAGE_SEPARATOR_DELIMITER) @@ -134,19 +136,34 @@ def launch( service_name, VALIDATOR_SUFFIX_SERVICE_NAME ) log_level = input_parser.get_client_log_level_or_default( - participant_log_level, global_log_level, PRYSM_LOG_LEVELS + participant_log_level, global_log_level, VERBOSITY_LEVELS + ) + + tolerations = input_parser.get_client_tolerations( + cl_tolerations, participant_tolerations, global_tolerations ) - bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU - bn_max_cpu = int(bn_max_cpu) if int(bn_max_cpu) > 0 else BEACON_MAX_CPU - bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY - bn_max_mem = int(bn_max_mem) if int(bn_max_mem) > 0 else BEACON_MAX_MEMORY network_name = ( "devnets" if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU + bn_max_cpu = ( + int(bn_max_cpu) + if int(bn_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["prysm_max_cpu"] + ) + bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY + bn_max_mem = ( + int(bn_max_mem) + if int(bn_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["prysm_max_mem"] + ) + cl_volume_size = ( int(cl_volume_size) if int(cl_volume_size) > 0 @@ -173,6 +190,7 @@ def launch( extra_beacon_labels, persistent, cl_volume_size, + tolerations, ) beacon_service = plan.add_service(beacon_service_name, beacon_config) @@ -189,6 +207,9 @@ def launch( v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY + tolerations = input_parser.get_client_tolerations( + validator_tolerations, participant_tolerations, global_tolerations + ) validator_config = get_validator_config( launcher.el_cl_genesis_data, validator_image, @@ -207,6 +228,7 @@ def launch( launcher.prysm_password_relative_filepath, launcher.prysm_password_artifact_uuid, persistent, + tolerations, ) validator_service = plan.add_service(validator_service_name, validator_config) @@ -285,6 +307,7 @@ def get_beacon_config( extra_labels, persistent, cl_volume_size, + tolerations, ): # If snooper is enabled use the snooper engine context, otherwise use the execution client context if snooper_enabled: @@ -412,6 +435,7 @@ def get_beacon_config( el_client_context.client_name, extra_labels, ), + tolerations=tolerations, ) @@ -433,6 +457,7 @@ def get_validator_config( prysm_password_relative_filepath, prysm_password_artifact_uuid, persistent, + tolerations, ): validator_keys_dirpath = shared_utils.path_join( VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER, @@ -497,6 +522,7 @@ def get_validator_config( el_client_context.client_name, extra_labels, ), + tolerations=tolerations, ) diff --git a/src/cl/teku/teku_launcher.star b/src/cl/teku/teku_launcher.star index d2dfd47e6..24d6d95d0 100644 --- a/src/cl/teku/teku_launcher.star +++ b/src/cl/teku/teku_launcher.star @@ -23,9 +23,7 @@ BEACON_METRICS_PORT_NUM = 8008 # The min/max CPU/memory that the beacon node can use BEACON_MIN_CPU = 50 -BEACON_MAX_CPU = 1000 BEACON_MIN_MEMORY = 1024 -BEACON_MAX_MEMORY = 2048 BEACON_METRICS_PATH = "/metrics" # ---------------------------------- Validator client ------------------------------------- @@ -88,7 +86,7 @@ VALIDATOR_USED_PORTS = { ENTRYPOINT_ARGS = ["sh", "-c"] -TEKU_LOG_LEVELS = { +VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", constants.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO", @@ -125,6 +123,10 @@ def launch( extra_validator_labels, persistent, cl_volume_size, + cl_tolerations, + validator_tolerations, + participant_tolerations, + global_tolerations, split_mode_enabled, ): beacon_service_name = "{0}".format(service_name) @@ -132,7 +134,11 @@ def launch( service_name, VALIDATOR_SUFFIX_SERVICE_NAME ) log_level = input_parser.get_client_log_level_or_default( - participant_log_level, global_log_level, TEKU_LOG_LEVELS + participant_log_level, global_log_level, VERBOSITY_LEVELS + ) + + tolerations = input_parser.get_client_tolerations( + cl_tolerations, participant_tolerations, global_tolerations ) extra_params = [param for param in extra_beacon_params] + [ @@ -146,17 +152,27 @@ def launch( int(bn_max_mem) if int(bn_max_mem) > 0 else holesky_beacon_memory_limit ) - bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU - bn_max_cpu = int(bn_max_cpu) if int(bn_max_cpu) > 0 else BEACON_MAX_CPU - bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY - bn_max_mem = int(bn_max_mem) if int(bn_max_mem) > 0 else BEACON_MAX_MEMORY - network_name = ( "devnets" if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU + bn_max_cpu = ( + int(bn_max_cpu) + if int(bn_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["teku_max_cpu"] + ) + bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY + bn_max_mem = ( + int(bn_max_mem) + if int(bn_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["teku_max_mem"] + ) + cl_volume_size = ( int(cl_volume_size) if int(cl_volume_size) > 0 @@ -185,6 +201,7 @@ def launch( split_mode_enabled, persistent, cl_volume_size, + tolerations, ) beacon_service = plan.add_service(service_name, config) @@ -227,7 +244,9 @@ def launch( v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY - + tolerations = input_parser.get_client_tolerations( + validator_tolerations, participant_tolerations, global_tolerations + ) validator_config = get_validator_config( launcher.el_cl_genesis_data, image, @@ -244,6 +263,7 @@ def launch( extra_validator_params, extra_validator_labels, persistent, + tolerations, ) validator_service = plan.add_service(validator_service_name, validator_config) @@ -298,6 +318,7 @@ def get_beacon_config( split_mode_enabled, persistent, cl_volume_size, + tolerations, ): validator_keys_dirpath = "" validator_secrets_dirpath = "" @@ -466,6 +487,7 @@ def get_beacon_config( extra_labels, ), user=User(uid=0, gid=0), + tolerations=tolerations, ) @@ -485,6 +507,7 @@ def get_validator_config( extra_params, extra_labels, persistent, + tolerations, ): validator_keys_dirpath = "" validator_secrets_dirpath = "" @@ -549,6 +572,7 @@ def get_validator_config( el_client_context.client_name, extra_labels, ), + tolerations=tolerations, ) diff --git a/src/el/besu/besu_launcher.star b/src/el/besu/besu_launcher.star index fe3c8b3a4..12a282b27 100644 --- a/src/el/besu/besu_launcher.star +++ b/src/el/besu/besu_launcher.star @@ -50,7 +50,7 @@ USED_PORTS = { ENTRYPOINT_ARGS = ["sh", "-c"] -BESU_LOG_LEVELS = { +VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", constants.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO", @@ -76,21 +76,38 @@ def launch( extra_labels, persistent, el_volume_size, + el_tolerations, + participant_tolerations, + global_tolerations, ): log_level = input_parser.get_client_log_level_or_default( - participant_log_level, global_log_level, BESU_LOG_LEVELS + participant_log_level, global_log_level, VERBOSITY_LEVELS + ) + tolerations = input_parser.get_client_tolerations( + el_tolerations, participant_tolerations, global_tolerations ) - el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU - el_max_cpu = int(el_max_cpu) if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU - el_min_mem = int(el_min_mem) if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY - el_max_mem = int(el_max_mem) if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY network_name = ( "devnets" if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU + el_max_cpu = ( + int(el_max_cpu) + if int(el_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["besu_max_cpu"] + ) + el_min_mem = int(el_min_mem) if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY + el_max_mem = ( + int(el_max_mem) + if int(el_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["besu_max_mem"] + ) + el_volume_size = ( el_volume_size if int(el_volume_size) > 0 @@ -118,6 +135,7 @@ def launch( extra_labels, persistent, el_volume_size, + tolerations, ) service = plan.add_service(service_name, config) @@ -161,6 +179,7 @@ def get_config( extra_labels, persistent, el_volume_size, + tolerations, ): cmd = [ "besu", @@ -255,6 +274,7 @@ def get_config( extra_labels, ), user=User(uid=0, gid=0), + tolerations=tolerations, ) diff --git a/src/el/erigon/erigon_launcher.star b/src/el/erigon/erigon_launcher.star index 2306ac3c5..cc503ec2e 100644 --- a/src/el/erigon/erigon_launcher.star +++ b/src/el/erigon/erigon_launcher.star @@ -18,9 +18,7 @@ METRICS_PORT_NUM = 9001 # The min/max CPU/memory that the execution node can use EXECUTION_MIN_CPU = 100 -EXECUTION_MAX_CPU = 1000 EXECUTION_MIN_MEMORY = 512 -EXECUTION_MAX_MEMORY = 2048 # Port IDs WS_RPC_PORT_ID = "ws-rpc" @@ -52,7 +50,7 @@ USED_PORTS = { ENTRYPOINT_ARGS = ["sh", "-c"] -ERIGON_LOG_LEVELS = { +VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "1", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "2", constants.GLOBAL_CLIENT_LOG_LEVEL.info: "3", @@ -78,22 +76,38 @@ def launch( extra_labels, persistent, el_volume_size, + el_tolerations, + participant_tolerations, + global_tolerations, ): log_level = input_parser.get_client_log_level_or_default( - participant_log_level, global_log_level, ERIGON_LOG_LEVELS + participant_log_level, global_log_level, VERBOSITY_LEVELS + ) + tolerations = input_parser.get_client_tolerations( + el_tolerations, participant_tolerations, global_tolerations ) - - el_min_cpu = el_min_cpu if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU - el_max_cpu = el_max_cpu if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU - el_min_mem = el_min_mem if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY - el_max_mem = el_max_mem if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY network_name = ( "devnets" if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU + el_max_cpu = ( + int(el_max_cpu) + if int(el_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["erigon_max_cpu"] + ) + el_min_mem = int(el_min_mem) if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY + el_max_mem = ( + int(el_max_mem) + if int(el_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["erigon_max_mem"] + ) + el_volume_size = ( el_volume_size if int(el_volume_size) > 0 @@ -122,6 +136,7 @@ def launch( extra_labels, persistent, el_volume_size, + tolerations, ) service = plan.add_service(service_name, config) @@ -168,6 +183,7 @@ def get_config( extra_labels, persistent, el_volume_size, + tolerations, ): init_datadir_cmd_str = "erigon init --datadir={0} {1}".format( EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, @@ -276,6 +292,7 @@ def get_config( extra_labels, ), user=User(uid=0, gid=0), + tolerations=tolerations, ) diff --git a/src/el/ethereumjs/ethereumjs_launcher.star b/src/el/ethereumjs/ethereumjs_launcher.star index a0a0901f1..55a9a926e 100644 --- a/src/el/ethereumjs/ethereumjs_launcher.star +++ b/src/el/ethereumjs/ethereumjs_launcher.star @@ -15,9 +15,7 @@ METRICS_PORT_NUM = 9001 # The min/max CPU/memory that the execution node can use EXECUTION_MIN_CPU = 100 -EXECUTION_MAX_CPU = 2000 EXECUTION_MIN_MEMORY = 256 -EXECUTION_MAX_MEMORY = 1024 # Port IDs RPC_PORT_ID = "rpc" @@ -80,22 +78,38 @@ def launch( extra_labels, persistent, el_volume_size, + el_tolerations, + participant_tolerations, + global_tolerations, ): log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) - - el_min_cpu = el_min_cpu if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU - el_max_cpu = el_max_cpu if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU - el_min_mem = el_min_mem if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY - el_max_mem = el_max_mem if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY + tolerations = input_parser.get_client_tolerations( + el_tolerations, participant_tolerations, global_tolerations + ) network_name = ( "devnets" if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU + el_max_cpu = ( + int(el_max_cpu) + if int(el_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["ethereumjs_max_cpu"] + ) + el_min_mem = int(el_min_mem) if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY + el_max_mem = ( + int(el_max_mem) + if int(el_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["ethereumjs_max_mem"] + ) + el_volume_size = ( el_volume_size if int(el_volume_size) > 0 @@ -123,6 +137,7 @@ def launch( extra_labels, persistent, el_volume_size, + tolerations, ) service = plan.add_service(service_name, config) @@ -165,6 +180,7 @@ def get_config( extra_labels, persistent, el_volume_size, + tolerations, ): cmd = [ "--dataDir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, @@ -249,6 +265,7 @@ def get_config( cl_client_name, extra_labels, ), + tolerations=tolerations, ) diff --git a/src/el/geth/geth_launcher.star b/src/el/geth/geth_launcher.star index 61e47160b..727818f7c 100644 --- a/src/el/geth/geth_launcher.star +++ b/src/el/geth/geth_launcher.star @@ -17,9 +17,7 @@ METRICS_PORT_NUM = 9001 # The min/max CPU/memory that the execution node can use EXECUTION_MIN_CPU = 300 -EXECUTION_MAX_CPU = 2000 EXECUTION_MIN_MEMORY = 512 -EXECUTION_MAX_MEMORY = 2048 # Port IDs RPC_PORT_ID = "rpc" @@ -88,20 +86,38 @@ def launch( extra_labels, persistent, el_volume_size, + el_tolerations, + participant_tolerations, + global_tolerations, ): log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) - el_min_cpu = el_min_cpu if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU - el_max_cpu = el_max_cpu if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU - el_min_mem = el_min_mem if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY - el_max_mem = el_max_mem if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY + tolerations = input_parser.get_client_tolerations( + el_tolerations, participant_tolerations, global_tolerations + ) + network_name = ( "devnets" if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU + el_max_cpu = ( + int(el_max_cpu) + if int(el_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["geth_max_cpu"] + ) + el_min_mem = int(el_min_mem) if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY + el_max_mem = ( + int(el_max_mem) + if int(el_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["geth_max_mem"] + ) + el_volume_size = ( el_volume_size if int(el_volume_size) > 0 @@ -133,6 +149,7 @@ def launch( launcher.final_genesis_timestamp, persistent, el_volume_size, + tolerations, ) service = plan.add_service(service_name, config) @@ -182,6 +199,7 @@ def get_config( final_genesis_timestamp, persistent, el_volume_size, + tolerations, ): # TODO: Remove this once electra fork has path based storage scheme implemented if electra_fork_epoch != None or constants.NETWORK_NAME.verkle in network: @@ -336,6 +354,7 @@ def get_config( cl_client_name, extra_labels, ), + tolerations=tolerations, ) diff --git a/src/el/nethermind/nethermind_launcher.star b/src/el/nethermind/nethermind_launcher.star index 0cfcef765..e59bfcf1e 100644 --- a/src/el/nethermind/nethermind_launcher.star +++ b/src/el/nethermind/nethermind_launcher.star @@ -19,9 +19,7 @@ METRICS_PORT_NUM = 9001 # The min/max CPU/memory that the execution node can use EXECUTION_MIN_CPU = 100 -EXECUTION_MAX_CPU = 1000 EXECUTION_MIN_MEMORY = 512 -EXECUTION_MAX_MEMORY = 2048 # Port IDs RPC_PORT_ID = "rpc" @@ -50,7 +48,7 @@ USED_PORTS = { ), } -NETHERMIND_LOG_LEVELS = { +VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", constants.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO", @@ -76,21 +74,38 @@ def launch( extra_labels, persistent, el_volume_size, + el_tolerations, + participant_tolerations, + global_tolerations, ): log_level = input_parser.get_client_log_level_or_default( - participant_log_level, global_log_level, NETHERMIND_LOG_LEVELS + participant_log_level, global_log_level, VERBOSITY_LEVELS + ) + tolerations = input_parser.get_client_tolerations( + el_tolerations, participant_tolerations, global_tolerations ) - el_min_cpu = el_min_cpu if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU - el_max_cpu = el_max_cpu if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU - el_min_mem = el_min_mem if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY - el_max_mem = el_max_mem if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY network_name = ( "devnets" if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU + el_max_cpu = ( + int(el_max_cpu) + if int(el_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["nethermind_max_cpu"] + ) + el_min_mem = int(el_min_mem) if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY + el_max_mem = ( + int(el_max_mem) + if int(el_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["nethermind_max_mem"] + ) + el_volume_size = ( el_volume_size if int(el_volume_size) > 0 @@ -118,6 +133,7 @@ def launch( extra_labels, persistent, el_volume_size, + tolerations, ) service = plan.add_service(service_name, config) @@ -162,6 +178,7 @@ def get_config( extra_labels, persistent, el_volume_size, + tolerations, ): cmd = [ "--log=" + log_level, @@ -246,6 +263,7 @@ def get_config( cl_client_name, extra_labels, ), + tolerations=tolerations, ) diff --git a/src/el/reth/reth_launcher.star b/src/el/reth/reth_launcher.star index cadca6348..411f8ee15 100644 --- a/src/el/reth/reth_launcher.star +++ b/src/el/reth/reth_launcher.star @@ -13,9 +13,7 @@ METRICS_PORT_NUM = 9001 # The min/max CPU/memory that the execution node can use EXECUTION_MIN_CPU = 100 -EXECUTION_MAX_CPU = 1000 EXECUTION_MIN_MEMORY = 256 -EXECUTION_MAX_MEMORY = 1024 # Port IDs RPC_PORT_ID = "rpc" @@ -79,21 +77,37 @@ def launch( extra_labels, persistent, el_volume_size, + el_tolerations, + participant_tolerations, + global_tolerations, ): log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) - - el_min_cpu = el_min_cpu if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU - el_max_cpu = el_max_cpu if int(el_max_cpu) > 0 else EXECUTION_MAX_CPU - el_min_mem = el_min_mem if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY - el_max_mem = el_max_mem if int(el_max_mem) > 0 else EXECUTION_MAX_MEMORY + tolerations = input_parser.get_client_tolerations( + el_tolerations, participant_tolerations, global_tolerations + ) network_name = ( "devnets" if launcher.network != "kurtosis" + and launcher.network != "ephemery" and launcher.network not in constants.PUBLIC_NETWORKS else launcher.network ) + + el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU + el_max_cpu = ( + int(el_max_cpu) + if int(el_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["reth_max_cpu"] + ) + el_min_mem = int(el_min_mem) if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY + el_max_mem = ( + int(el_max_mem) + if int(el_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["reth_max_mem"] + ) + el_volume_size = ( el_volume_size if int(el_volume_size) > 0 @@ -121,6 +135,7 @@ def launch( extra_labels, persistent, el_volume_size, + tolerations, ) service = plan.add_service(service_name, config) @@ -164,6 +179,7 @@ def get_config( extra_labels, persistent, el_volume_size, + tolerations, ): init_datadir_cmd_str = "reth init --datadir={0} --chain={1}".format( EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, @@ -242,6 +258,7 @@ def get_config( persistent_key="data-{0}".format(service_name), size=el_volume_size, ) + return ServiceConfig( image=image, ports=USED_PORTS, @@ -261,6 +278,7 @@ def get_config( cl_client_name, extra_labels, ), + tolerations=tolerations, ) diff --git a/src/package_io/constants.star b/src/package_io/constants.star index a0055a3cc..150e89d6d 100644 --- a/src/package_io/constants.star +++ b/src/package_io/constants.star @@ -165,6 +165,19 @@ VOLUME_SIZE = { "nimbus_volume_size": 100000, # 100GB "lodestar_volume_size": 100000, # 100GB }, + "ephemery": { + "geth_volume_size": 5000, # 5GB + "erigon_volume_size": 3000, # 3GB + "nethermind_volume_size": 3000, # 3GB + "besu_volume_size": 3000, # 3GB + "reth_volume_size": 3000, # 3GB + "ethereumjs_volume_size": 3000, # 3GB + "prysm_volume_size": 1000, # 1GB + "lighthouse_volume_size": 1000, # 1GB + "teku_volume_size": 1000, # 1GB + "nimbus_volume_size": 1000, # 1GB + "lodestar_volume_size": 1000, # 1GB + }, "kurtosis": { "geth_volume_size": 5000, # 5GB "erigon_volume_size": 3000, # 3GB @@ -179,3 +192,174 @@ VOLUME_SIZE = { "lodestar_volume_size": 1000, # 1GB }, } + +RAM_CPU_OVERRIDES = { + "mainnet": { + "geth_max_mem": 16384, # 16GB + "geth_max_cpu": 4000, # 4 cores + "erigon_max_mem": 16384, # 16GB + "erigon_max_cpu": 4000, # 4 cores + "nethermind_max_mem": 16384, # 16GB + "nethermind_max_cpu": 4000, # 4 cores + "besu_max_mem": 16384, # 16GB + "besu_max_cpu": 4000, # 4 cores + "reth_max_mem": 16384, # 16GB + "reth_max_cpu": 4000, # 4 cores + "ethereumjs_max_mem": 16384, # 16GB + "ethereumjs_max_cpu": 4000, # 4 cores + "prysm_max_mem": 16384, # 16GB + "prysm_max_cpu": 4000, # 4 cores + "lighthouse_max_mem": 16384, # 16GB + "lighthouse_max_cpu": 4000, # 4 cores + "teku_max_mem": 16384, # 16GB + "teku_max_cpu": 4000, # 4 cores + "nimbus_max_mem": 16384, # 16GB + "nimbus_max_cpu": 4000, # 4 cores + "lodestar_max_mem": 16384, # 16GB + "lodestar_max_cpu": 4000, # 4 cores + }, + "goerli": { + "geth_max_mem": 8192, # 8GB + "geth_max_cpu": 2000, # 2 cores + "erigon_max_mem": 8192, # 8GB + "erigon_max_cpu": 2000, # 2 cores + "nethermind_max_mem": 8192, # 8GB + "nethermind_max_cpu": 2000, # 2 cores + "besu_max_mem": 8192, # 8GB + "besu_max_cpu": 2000, # 2 cores + "reth_max_mem": 8192, # 8GB + "reth_max_cpu": 2000, # 2 cores + "ethereumjs_max_mem": 8192, # 8GB + "ethereumjs_max_cpu": 2000, # 2 cores + "prysm_max_mem": 8192, # 8GB + "prysm_max_cpu": 2000, # 2 cores + "lighthouse_max_mem": 8192, # 8GB + "lighthouse_max_cpu": 2000, # 2 cores + "teku_max_mem": 8192, # 8GB + "teku_max_cpu": 2000, # 2 cores + "nimbus_max_mem": 8192, # 8GB + "nimbus_max_cpu": 2000, # 2 cores + "lodestar_max_mem": 8192, # 8GB + "lodestar_max_cpu": 2000, # 2 cores + }, + "sepolia": { + "geth_max_mem": 4096, # 4GB + "geth_max_cpu": 1000, # 1 core + "erigon_max_mem": 4096, # 4GB + "erigon_max_cpu": 1000, # 1 core + "nethermind_max_mem": 4096, # 4GB + "nethermind_max_cpu": 1000, # 1 core + "besu_max_mem": 4096, # 4GB + "besu_max_cpu": 1000, # 1 core + "reth_max_mem": 4096, # 4GB + "reth_max_cpu": 1000, # 1 core + "ethereumjs_max_mem": 4096, # 4GB + "ethereumjs_max_cpu": 1000, # 1 core + "prysm_max_mem": 4096, # 4GB + "prysm_max_cpu": 1000, # 1 core + "lighthouse_max_mem": 4096, # 4GB + "lighthouse_max_cpu": 1000, # 1 core + "teku_max_mem": 4096, # 4GB + "teku_max_cpu": 1000, # 1 core + "nimbus_max_mem": 4096, # 4GB + "nimbus_max_cpu": 1000, # 1 core + "lodestar_max_mem": 4096, # 4GB + "lodestar_max_cpu": 1000, # 1 core + }, + "holesky": { + "geth_max_mem": 8192, # 8GB + "geth_max_cpu": 2000, # 2 cores + "erigon_max_mem": 8192, # 8GB + "erigon_max_cpu": 2000, # 2 cores + "nethermind_max_mem": 8192, # 8GB + "nethermind_max_cpu": 2000, # 2 cores + "besu_max_mem": 8192, # 8GB + "besu_max_cpu": 2000, # 2 cores + "reth_max_mem": 8192, # 8GB + "reth_max_cpu": 2000, # 2 cores + "ethereumjs_max_mem": 8192, # 8GB + "ethereumjs_max_cpu": 2000, # 2 cores + "prysm_max_mem": 8192, # 8GB + "prysm_max_cpu": 2000, # 2 cores + "lighthouse_max_mem": 8192, # 8GB + "lighthouse_max_cpu": 2000, # 2 cores + "teku_max_mem": 8192, # 8GB + "teku_max_cpu": 2000, # 2 cores + "nimbus_max_mem": 8192, # 8GB + "nimbus_max_cpu": 2000, # 2 cores + "lodestar_max_mem": 8192, # 8GB + "lodestar_max_cpu": 2000, # 2 cores + }, + "devnets": { + "geth_max_mem": 4096, # 4GB + "geth_max_cpu": 1000, # 1 core + "erigon_max_mem": 4096, # 4GB + "erigon_max_cpu": 1000, # 1 core + "nethermind_max_mem": 4096, # 4GB + "nethermind_max_cpu": 1000, # 1 core + "besu_max_mem": 4096, # 4GB + "besu_max_cpu": 1000, # 1 core + "reth_max_mem": 4096, # 4GB + "reth_max_cpu": 1000, # 1 core + "ethereumjs_max_mem": 4096, # 4GB + "ethereumjs_max_cpu": 1000, # 1 core + "prysm_max_mem": 4096, # 4GB + "prysm_max_cpu": 1000, # 1 core + "lighthouse_max_mem": 4096, # 4GB + "lighthouse_max_cpu": 1000, # 1 core + "teku_max_mem": 4096, # 4GB + "teku_max_cpu": 1000, # 1 core + "nimbus_max_mem": 4096, # 4GB + "nimbus_max_cpu": 1000, # 1 core + "lodestar_max_mem": 4096, # 4GB + "lodestar_max_cpu": 1000, # 1 core + }, + "ephemery": { + "geth_max_mem": 1024, # 1GB + "geth_max_cpu": 1000, # 1 core + "erigon_max_mem": 1024, # 1GB + "erigon_max_cpu": 1000, # 1 core + "nethermind_max_mem": 1024, # 1GB + "nethermind_max_cpu": 1000, # 1 core + "besu_max_mem": 1024, # 1GB + "besu_max_cpu": 1000, # 1 core + "reth_max_mem": 1024, # 1GB + "reth_max_cpu": 1000, # 1 core + "ethereumjs_max_mem": 1024, # 1GB + "ethereumjs_max_cpu": 1000, # 1 core + "prysm_max_mem": 1024, # 1GB + "prysm_max_cpu": 1000, # 1 core + "lighthouse_max_mem": 1024, # 1GB + "lighthouse_max_cpu": 1000, # 1 core + "teku_max_mem": 1024, # 1GB + "teku_max_cpu": 1000, # 1 core + "nimbus_max_mem": 1024, # 1GB + "nimbus_max_cpu": 1000, # 1 core + "lodestar_max_mem": 1024, # 1GB + "lodestar_max_cpu": 1000, # 1 core + }, + "kurtosis": { + "geth_max_mem": 1024, # 1GB + "geth_max_cpu": 1000, # 1 core + "erigon_max_mem": 1024, # 1GB + "erigon_max_cpu": 1000, # 1 core + "nethermind_max_mem": 1024, # 1GB + "nethermind_max_cpu": 1000, # 1 core + "besu_max_mem": 1024, # 1GB + "besu_max_cpu": 1000, # 1 core + "reth_max_mem": 1024, # 1GB + "reth_max_cpu": 1000, # 1 core + "ethereumjs_max_mem": 1024, # 1GB + "ethereumjs_max_cpu": 1000, # 1 core + "prysm_max_mem": 1024, # 1GB + "prysm_max_cpu": 1000, # 1 core + "lighthouse_max_mem": 1024, # 1GB + "lighthouse_max_cpu": 1000, # 1 core + "teku_max_mem": 1024, # 1GB + "teku_max_cpu": 1000, # 1 core + "nimbus_max_mem": 1024, # 1GB + "nimbus_max_cpu": 1000, # 1 core + "lodestar_max_mem": 1024, # 1GB + "lodestar_max_cpu": 1000, # 1 core + }, +} diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index b55d9fbd5..cc0cb91e0 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -78,6 +78,7 @@ def input_parser(plan, input_args): result["assertoor_params"] = get_default_assertoor_params() result["xatu_sentry_params"] = get_default_xatu_sentry_params() result["persistent"] = False + result["global_tolerations"] = [] for attr in input_args: value = input_args[attr] @@ -143,11 +144,15 @@ def input_parser(plan, input_args): el_extra_params=participant["el_extra_params"], el_extra_env_vars=participant["el_extra_env_vars"], el_extra_labels=participant["el_extra_labels"], + el_tolerations=participant["el_tolerations"], cl_client_type=participant["cl_client_type"], cl_client_image=participant["cl_client_image"], cl_client_log_level=participant["cl_client_log_level"], cl_client_volume_size=participant["cl_client_volume_size"], cl_split_mode_enabled=participant["cl_split_mode_enabled"], + cl_tolerations=participant["cl_tolerations"], + tolerations=participant["tolerations"], + validator_tolerations=participant["validator_tolerations"], beacon_extra_params=participant["beacon_extra_params"], beacon_extra_labels=participant["beacon_extra_labels"], validator_extra_params=participant["validator_extra_params"], @@ -268,6 +273,7 @@ def input_parser(plan, input_args): beacon_subscriptions=result["xatu_sentry_params"]["beacon_subscriptions"], xatu_server_tls=result["xatu_sentry_params"]["xatu_server_tls"], ), + global_tolerations=result["global_tolerations"], ) @@ -454,6 +460,44 @@ def get_client_log_level_or_default( return log_level +def get_client_tolerations( + specific_container_toleration, participant_tolerations, global_tolerations +): + toleration_list = [] + tolerations = [] + tolerations = specific_container_toleration if specific_container_toleration else [] + if not tolerations: + tolerations = participant_tolerations if participant_tolerations else [] + if not tolerations: + tolerations = global_tolerations if global_tolerations else [] + + if tolerations != []: + for toleration_data in tolerations: + if toleration_data.get("toleration_seconds"): + toleration_list.append( + Toleration( + key=toleration_data.get("key", ""), + value=toleration_data.get("value", ""), + operator=toleration_data.get("operator", ""), + effect=toleration_data.get("effect", ""), + toleration_seconds=toleration_data.get("toleration_seconds"), + ) + ) + # Gyani has to fix this in the future + # https://github.com/kurtosis-tech/kurtosis/issues/2093 + else: + toleration_list.append( + Toleration( + key=toleration_data.get("key", ""), + value=toleration_data.get("value", ""), + operator=toleration_data.get("operator", ""), + effect=toleration_data.get("effect", ""), + ) + ) + + return toleration_list + + def default_input_args(): network_params = default_network_params() participants = [default_participant()] @@ -467,6 +511,7 @@ def default_input_args(): "xatu_sentry_enabled": False, "parallel_keystore_generation": False, "disable_peer_scoring": False, + "global_tolerations": [], } @@ -499,11 +544,15 @@ def default_participant(): "el_extra_params": [], "el_extra_env_vars": {}, "el_extra_labels": {}, + "el_tolerations": [], "cl_client_type": "lighthouse", "cl_client_image": "", "cl_client_log_level": "", "cl_client_volume_size": 0, "cl_split_mode_enabled": False, + "cl_tolerations": [], + "validator_tolerations": [], + "tolerations": [], "beacon_extra_params": [], "beacon_extra_labels": {}, "validator_extra_params": [], diff --git a/src/participant_network.star b/src/participant_network.star index c52410ab9..f722732c3 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -64,6 +64,7 @@ def launch_participant_network( jwt_file, persistent, xatu_sentry_params, + global_tolerations, parallel_keystore_generation=False, ): num_participants = len(participants) @@ -327,6 +328,9 @@ def launch_participant_network( participant.el_extra_labels, persistent, participant.el_client_volume_size, + participant.el_tolerations, + participant.tolerations, + global_tolerations, ) # Add participant el additional prometheus metrics @@ -474,6 +478,10 @@ def launch_participant_network( participant.validator_extra_labels, persistent, participant.cl_client_volume_size, + participant.cl_tolerations, + participant.validator_tolerations, + participant.tolerations, + global_tolerations, participant.cl_split_mode_enabled, ) else: @@ -506,6 +514,10 @@ def launch_participant_network( participant.validator_extra_labels, persistent, participant.cl_client_volume_size, + participant.cl_tolerations, + participant.validator_tolerations, + participant.tolerations, + global_tolerations, participant.cl_split_mode_enabled, ) From 01868fcb604852cf66474fc9de9a53a7b87b7bc3 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Wed, 31 Jan 2024 22:19:06 +0100 Subject: [PATCH 07/33] fix: add more prefund addresses for verkle-gen (#482) --- src/participant_network.star | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/participant_network.star b/src/participant_network.star index f722732c3..e366e0fcf 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -125,7 +125,7 @@ def launch_participant_network( elif network_params.electra_fork_epoch != None: if network_params.electra_fork_epoch == 0: ethereum_genesis_generator_image = ( - "ethpandaops/ethereum-genesis-generator:4.0.0-rc.3" + "ethpandaops/ethereum-genesis-generator:4.0.0-rc.5" ) else: ethereum_genesis_generator_image = ( From 2d8a143f753eaa3ec13abe4ebbb57bf82548b3fb Mon Sep 17 00:00:00 2001 From: pk910 Date: Thu, 1 Feb 2024 08:58:31 +0100 Subject: [PATCH 08/33] feat: add support for custom assertoor images & use assertoor image with verkle support for verkle chains (#483) --- README.md | 4 ++++ main.star | 1 + src/assertoor/assertoor_launcher.star | 17 +++++++++++++++-- src/package_io/input_parser.star | 2 ++ 4 files changed, 22 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 85c0fd7c9..afe70ee1c 100644 --- a/README.md +++ b/README.md @@ -385,6 +385,10 @@ goomy_blob_params: # Configuration place for the assertoor testing tool - https:#github.com/ethpandaops/assertoor assertoor_params: + # Assertoor docker image to use + # Leave blank to use the default image according to your network params + image: "" + # Check chain stability # This check monitors the chain and succeeds if: # - all clients are synced diff --git a/main.star b/main.star index 4a219d12d..a955690e2 100644 --- a/main.star +++ b/main.star @@ -390,6 +390,7 @@ def run(plan, args={}): assertoor_config_template, all_participants, args_with_right_defaults.participants, + network_params, assertoor_params, ) plan.print("Successfully launched assertoor") diff --git a/src/assertoor/assertoor_launcher.star b/src/assertoor/assertoor_launcher.star index 5589a03db..c794dc069 100644 --- a/src/assertoor/assertoor_launcher.star +++ b/src/assertoor/assertoor_launcher.star @@ -34,6 +34,7 @@ def launch_assertoor( config_template, participant_contexts, participant_configs, + network_params, assertoor_params, ): all_client_info = [] @@ -88,18 +89,30 @@ def launch_assertoor( config = get_config( config_files_artifact_name, tests_config_artifacts_name, + network_params, + assertoor_params, ) plan.add_service(SERVICE_NAME, config) -def get_config(config_files_artifact_name, tests_config_artifacts_name): +def get_config( + config_files_artifact_name, + tests_config_artifacts_name, + network_params, + assertoor_params, +): config_file_path = shared_utils.path_join( ASSERTOOR_CONFIG_MOUNT_DIRPATH_ON_SERVICE, ASSERTOOR_CONFIG_FILENAME, ) - IMAGE_NAME = "ethpandaops/assertoor:master" + if assertoor_params.image != "": + IMAGE_NAME = assertoor_params.image + elif network_params.electra_fork_epoch != None: + IMAGE_NAME = "ethpandaops/assertoor:verkle-support" + else: + IMAGE_NAME = "ethpandaops/assertoor:master" return ServiceConfig( image=IMAGE_NAME, diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index cc0cb91e0..211a9175f 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -236,6 +236,7 @@ def input_parser(plan, input_args): goomy_blob_args=result["goomy_blob_params"]["goomy_blob_args"], ), assertoor_params=struct( + image=result["assertoor_params"]["image"], run_stability_check=result["assertoor_params"]["run_stability_check"], run_block_proposal_check=result["assertoor_params"][ "run_block_proposal_check" @@ -614,6 +615,7 @@ def get_default_goomy_blob_params(): def get_default_assertoor_params(): return { + "image": "", "run_stability_check": True, "run_block_proposal_check": True, "run_lifecycle_test": False, From bbe0b16e948fc50f51273e2f0ab91503603e9fc9 Mon Sep 17 00:00:00 2001 From: pk910 Date: Thu, 1 Feb 2024 09:28:35 +0100 Subject: [PATCH 09/33] fix: use latest stable image for assertoor (#484) --- src/assertoor/assertoor_launcher.star | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/assertoor/assertoor_launcher.star b/src/assertoor/assertoor_launcher.star index c794dc069..9a41d8e31 100644 --- a/src/assertoor/assertoor_launcher.star +++ b/src/assertoor/assertoor_launcher.star @@ -112,7 +112,7 @@ def get_config( elif network_params.electra_fork_epoch != None: IMAGE_NAME = "ethpandaops/assertoor:verkle-support" else: - IMAGE_NAME = "ethpandaops/assertoor:master" + IMAGE_NAME = "ethpandaops/assertoor:latest" return ServiceConfig( image=IMAGE_NAME, From abdfc2c3e73550069c2fbe0df5202f7f227a00cd Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Thu, 1 Feb 2024 10:53:22 +0100 Subject: [PATCH 10/33] feat: we no longer need 4788 deployer (#485) --- README.md | 1 - main.star | 15 --- .../eip4788_deployment_launcher.star | 38 ------ src/eip4788_deployment/sender.py | 122 ------------------ src/participant_network.star | 2 +- 5 files changed, 1 insertion(+), 177 deletions(-) delete mode 100644 src/eip4788_deployment/eip4788_deployment_launcher.star delete mode 100644 src/eip4788_deployment/sender.py diff --git a/README.md b/README.md index afe70ee1c..7fb2f0f49 100644 --- a/README.md +++ b/README.md @@ -769,7 +769,6 @@ Here's a table of where the keys are used | 1 | blob_spammer | ✅ | | As the sender of blobs | | 3 | transaction_spammer | ✅ | | To spam transactions with | | 4 | goomy_blob | ✅ | | As the sender of blobs | -| 5 | eip4788_deployment | ✅ | | As contract deployer | | 6 | mev_flood | ✅ | | As the contract owner | | 7 | mev_flood | ✅ | | As the user_key | | 8 | assertoor | ✅ | ✅ | As the funding for tests | diff --git a/main.star b/main.star index a955690e2..9ccb88719 100644 --- a/main.star +++ b/main.star @@ -34,9 +34,6 @@ mev_flood = import_module("./src/mev/mev_flood/mev_flood_launcher.star") mev_custom_flood = import_module( "./src/mev/mev_custom_flood/mev_custom_flood_launcher.star" ) -eip4788_deployment = import_module( - "./src/eip4788_deployment/eip4788_deployment_launcher.star" -) broadcaster = import_module("./src/broadcaster/broadcaster.star") assertoor = import_module("./src/assertoor/assertoor_launcher.star") @@ -130,18 +127,6 @@ def run(plan, args={}): all_cl_client_contexts, args_with_right_defaults.participants, ) - if network_params.network == constants.NETWORK_NAME.kurtosis: - if network_params.deneb_fork_epoch != 0: - plan.print("Launching 4788 contract deployer") - el_uri = "http://{0}:{1}".format( - all_el_client_contexts[0].ip_addr, - all_el_client_contexts[0].rpc_port_num, - ) - eip4788_deployment.deploy_eip4788_contract_in_background( - plan, - genesis_constants.PRE_FUNDED_ACCOUNTS[5].private_key, - el_uri, - ) fuzz_target = "http://{0}:{1}".format( all_el_client_contexts[0].ip_addr, diff --git a/src/eip4788_deployment/eip4788_deployment_launcher.star b/src/eip4788_deployment/eip4788_deployment_launcher.star deleted file mode 100644 index 421a30f52..000000000 --- a/src/eip4788_deployment/eip4788_deployment_launcher.star +++ /dev/null @@ -1,38 +0,0 @@ -PYTHON_IMAGE = "ethpandaops/python-web3" -EIP4788_DEPLOYMENT_SERVICE_NAME = "eip4788-contract-deployment" - -# The min/max CPU/memory that deployer can use -MIN_CPU = 10 -MAX_CPU = 100 -MIN_MEMORY = 10 -MAX_MEMORY = 300 - - -def deploy_eip4788_contract_in_background(plan, sender_key, el_uri): - sender_script = plan.upload_files( - src="./sender.py", name="eip4788-deployment-sender" - ) - - plan.add_service( - name=EIP4788_DEPLOYMENT_SERVICE_NAME, - config=ServiceConfig( - image=PYTHON_IMAGE, - files={"/tmp": sender_script}, - cmd=["/bin/sh", "-c", "touch /tmp/sender.log && tail -f /tmp/sender.log"], - env_vars={ - "SENDER_PRIVATE_KEY": sender_key, - "EL_RPC_URI": el_uri, - }, - min_cpu=MIN_CPU, - max_cpu=MAX_CPU, - min_memory=MIN_MEMORY, - max_memory=MAX_MEMORY, - ), - ) - - plan.exec( - service_name=EIP4788_DEPLOYMENT_SERVICE_NAME, - recipe=ExecRecipe( - ["/bin/sh", "-c", "nohup python /tmp/sender.py > /dev/null 2>&1 &"] - ), - ) diff --git a/src/eip4788_deployment/sender.py b/src/eip4788_deployment/sender.py deleted file mode 100644 index fe6273389..000000000 --- a/src/eip4788_deployment/sender.py +++ /dev/null @@ -1,122 +0,0 @@ -""" -this script deploys the contract used by eip4788. It has been presigned and the contract uses a deterministic deployment. - -""" - -from web3 import Web3 -from web3.middleware import construct_sign_and_send_raw_middleware -import os -import time -import logging -from decimal import Decimal - -VALUE_TO_SEND = 0x9184 - -logging.basicConfig(filename="/tmp/sender.log", - filemode='a', - format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', - datefmt='%H:%M:%S', - level=logging.INFO) - - -def eip4788_deployment(): - # this is the 5th prefunded address - sender = os.getenv("SENDER_PRIVATE_KEY", "7da08f856b5956d40a72968f93396f6acff17193f013e8053f6fbb6c08c194d6") - # this is the 4788 presigned contract deployer - receiver = "0x0B799C86a49DEeb90402691F1041aa3AF2d3C875" - signed_4788_deployment_tx = os.getenv("SIGNED_4788_DEPLOYMENT_TX", "f8838085e8d4a510008303d0908080b86a60618060095f395ff33373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff0155001b820539851b9b6eb1f0") - el_uri = os.getenv("EL_RPC_URI", 'http://0.0.0.0:53913') - logging.info(f"Using sender {sender} receiver {receiver} and el_uri {el_uri}") - - w3 = Web3(Web3.HTTPProvider(el_uri)) - # sleep for 10s before checking again - time.sleep(10) - - # Check if the chain has started before submitting transactions - block = w3.eth.get_block('latest') - - logging.info(f"Latest block number: {block.number}") - if block.number >1: - logging.info("Chain has started, proceeding with Funding") - # Import sender account - sender_account = w3.eth.account.from_key(sender) - # Prepare to Construct and sign transaction - w3.middleware_onion.add(construct_sign_and_send_raw_middleware(sender_account)) - - # Prepare funding transaction - logging.info("Preparing funding tx") - transaction = { - "from": sender_account.address, - "to": receiver, - "value": w3.to_wei(Decimal('1000.0'), 'ether'), # Sending 1000 Ether - "gasPrice": w3.eth.gas_price, - 'nonce': w3.eth.get_transaction_count(sender_account.address) - } - - # Estimate gas - logging.info("Estimating gas") - estimated_gas = w3.eth.estimate_gas(transaction) - - # Set gas value - transaction["gas"] = estimated_gas - - # Send transaction - logging.debug(f"Sending deployment tx: {transaction}") - tx_hash = w3.eth.send_transaction(transaction) - - time.sleep(10) - # Wait for the transaction to be mined - funding_tx = w3.eth.get_transaction(tx_hash) - logging.debug(f"Funding Txhash: {tx_hash.hex()}") - logging.info(f"Genesis funder Balance: {w3.eth.get_balance(sender_account.address)}") - logging.info(f"4788 deployer Balance: {w3.eth.get_balance(receiver)}") - - if funding_tx["from"] == sender_account.address: - logging.info("Funding tx mined successfully") - logging.info("Deploying signed tx") - # Prepare deployment transaction - deployment_tx_hash = w3.eth.send_raw_transaction(signed_4788_deployment_tx) - - # Sleep before checking - time.sleep(10) - deployment_tx = w3.eth.get_transaction(deployment_tx_hash) - logging.debug(f"Deployment Txhash: {deployment_tx.hash.hex()}") - - # Sleep before checking - time.sleep(10) - - logging.info(f"4788 deployer Balance: {w3.eth.get_balance(receiver)}") - assert deployment_tx["from"] == receiver - - # Check if contract has been deployed - eip4788_code = w3.eth.get_code('0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02') - if eip4788_code != "": - logging.info(f"Contract deployed: {eip4788_code.hex()}") - logging.info("Deployment tx mined successfully") - - # Exit script - return True - else: - logging.info("Deployment failed, restarting script") - return False - else: - logging.info("Funding failed, restarting script") - return False - else: - logging.info("Chain has not started, restarting script") - return False - -def run_till_deployed(): - deployment_status = False - while deployment_status is False: - try: - deployment_status = eip4788_deployment() - except Exception as e: - logging.error(e) - logging.error("restarting deployment as previous one failed") - - - -if __name__ == "__main__": - run_till_deployed() - logging.info("Deployment complete, exiting script") diff --git a/src/participant_network.star b/src/participant_network.star index e366e0fcf..f45d83656 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -119,7 +119,7 @@ def launch_participant_network( and network_params.electra_fork_epoch == None ): ethereum_genesis_generator_image = ( - "ethpandaops/ethereum-genesis-generator:2.0.8" + "ethpandaops/ethereum-genesis-generator:2.0.11" ) # we are running electra - experimental elif network_params.electra_fork_epoch != None: From 79dc5e19713d3f898f6255394290497d016f32d5 Mon Sep 17 00:00:00 2001 From: Parithosh Jayanthi Date: Thu, 1 Feb 2024 12:19:12 +0100 Subject: [PATCH 11/33] fix: bump verkle genesis generator (#486) go-verkle depns changes, we need this bump to be compatible for the current verkle tests --- src/participant_network.star | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/participant_network.star b/src/participant_network.star index f45d83656..456f187e8 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -125,7 +125,7 @@ def launch_participant_network( elif network_params.electra_fork_epoch != None: if network_params.electra_fork_epoch == 0: ethereum_genesis_generator_image = ( - "ethpandaops/ethereum-genesis-generator:4.0.0-rc.5" + "ethpandaops/ethereum-genesis-generator:4.0.0-rc.6" ) else: ethereum_genesis_generator_image = ( From 1e543e873c06e86a6448f8e88c53fb1bde35338e Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Fri, 2 Feb 2024 12:00:41 +0100 Subject: [PATCH 12/33] feat: add verkle-gen-devnet-3 (#487) --- .../{verkle-gen-devnet-2.yaml => verkle-gen-devnet-3.yaml} | 6 +++--- .github/tests/verkle-gen.yaml | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) rename .github/tests/{verkle-gen-devnet-2.yaml => verkle-gen-devnet-3.yaml} (60%) diff --git a/.github/tests/verkle-gen-devnet-2.yaml b/.github/tests/verkle-gen-devnet-3.yaml similarity index 60% rename from .github/tests/verkle-gen-devnet-2.yaml rename to .github/tests/verkle-gen-devnet-3.yaml index ded906b52..818ae0bcd 100644 --- a/.github/tests/verkle-gen-devnet-2.yaml +++ b/.github/tests/verkle-gen-devnet-3.yaml @@ -1,13 +1,13 @@ participants: - el_client_type: geth - el_client_image: ethpandaops/geth:gballet-kaustinen-with-shapella-fc8f4b9 + el_client_image: ethpandaops/geth:kaustinen-with-shapella-6d7b22c cl_client_type: lighthouse cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 count: 2 - el_client_type: geth - el_client_image: ethpandaops/geth:gballet-kaustinen-with-shapella-fc8f4b9 + el_client_image: ethpandaops/geth:kaustinen-with-shapella-6d7b22c cl_client_type: lodestar cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b network_params: - network: verkle-gen-devnet-2 + network: verkle-gen-devnet-3 diff --git a/.github/tests/verkle-gen.yaml b/.github/tests/verkle-gen.yaml index 553a11f5a..5164562da 100644 --- a/.github/tests/verkle-gen.yaml +++ b/.github/tests/verkle-gen.yaml @@ -1,13 +1,14 @@ participants: - el_client_type: geth - el_client_image: ethpandaops/geth:gballet-kaustinen-with-shapella-fc8f4b9 + el_client_image: ethpandaops/geth:kaustinen-with-shapella-6d7b22c cl_client_type: lighthouse cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 count: 2 - el_client_type: geth - el_client_image: ethpandaops/geth:gballet-kaustinen-with-shapella-fc8f4b9 + el_client_image: ethpandaops/geth:kaustinen-with-shapella-6d7b22c cl_client_type: lodestar cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b + count: 2 network_params: electra_fork_epoch: 0 additional_services: From b3418cf1545378d4b412966b9c33f650141aec04 Mon Sep 17 00:00:00 2001 From: franjoespejo Date: Fri, 2 Feb 2024 12:54:49 +0100 Subject: [PATCH 13/33] feat: blockscout support with sc verification (#481) addresses #372 --------- Co-authored-by: franjoespejo --- .github/tests/mix-with-tools-mev.yaml | 1 + .github/tests/mix-with-tools.yaml | 1 + README.md | 1 + main.star | 12 ++- src/blockscout/blockscout_launcher.star | 131 ++++++++++++++++++++++++ 5 files changed, 145 insertions(+), 1 deletion(-) create mode 100644 src/blockscout/blockscout_launcher.star diff --git a/.github/tests/mix-with-tools-mev.yaml b/.github/tests/mix-with-tools-mev.yaml index d20500b15..51aaccc45 100644 --- a/.github/tests/mix-with-tools-mev.yaml +++ b/.github/tests/mix-with-tools-mev.yaml @@ -21,6 +21,7 @@ additional_services: - goomy_blob - custom_flood - blobscan + - blockscout ethereum_metrics_exporter_enabled: true snooper_enabled: true mev_type: full diff --git a/.github/tests/mix-with-tools.yaml b/.github/tests/mix-with-tools.yaml index 2853d3a18..52afabd67 100644 --- a/.github/tests/mix-with-tools.yaml +++ b/.github/tests/mix-with-tools.yaml @@ -21,5 +21,6 @@ additional_services: - goomy_blob - custom_flood - blobscan + - blockscout ethereum_metrics_exporter_enabled: true snooper_enabled: true diff --git a/README.md b/README.md index 7fb2f0f49..1c5a832cd 100644 --- a/README.md +++ b/README.md @@ -458,6 +458,7 @@ additional_services: - custom_flood - goomy_blob - el_forkmon + - blockscout - beacon_metrics_gazer - dora - full_beaconchain_explorer diff --git a/main.star b/main.star index 9ccb88719..bef06e591 100644 --- a/main.star +++ b/main.star @@ -25,6 +25,7 @@ blobscan = import_module("./src/blobscan/blobscan_launcher.star") full_beaconchain_explorer = import_module( "./src/full_beaconchain/full_beaconchain_launcher.star" ) +blockscout = import_module("./src/blockscout/blockscout_launcher.star") prometheus = import_module("./src/prometheus/prometheus_launcher.star") grafana = import_module("./src/grafana/grafana_launcher.star") mev_boost = import_module("./src/mev/mev_boost/mev_boost_launcher.star") @@ -68,7 +69,6 @@ def run(plan, args={}): static_files.GRAFANA_DASHBOARD_PROVIDERS_CONFIG_TEMPLATE_FILEPATH ) prometheus_additional_metrics_jobs = [] - raw_jwt_secret = read_file(static_files.JWT_PATH_FILEPATH) jwt_file = plan.upload_files( src=static_files.JWT_PATH_FILEPATH, @@ -326,6 +326,12 @@ def run(plan, args={}): beacon_metrics_gazer_prometheus_metrics_job ) plan.print("Successfully launched beacon metrics gazer") + elif additional_service == "blockscout": + plan.print("Launching blockscout") + blockscout_sc_verif_url = blockscout.launch_blockscout( + plan, all_el_client_contexts, persistent + ) + plan.print("Successfully launched blockscout") elif additional_service == "dora": plan.print("Launching dora") dora_config_template = read_file(static_files.DORA_CONFIG_TEMPLATE_FILEPATH) @@ -434,8 +440,12 @@ def run(plan, args={}): user=GRAFANA_USER, password=GRAFANA_PASSWORD, ) + output = struct( grafana_info=grafana_info, + blockscout_sc_verif_url=None + if ("blockscout" in args_with_right_defaults.additional_services) == False + else blockscout_sc_verif_url, all_participants=all_participants, final_genesis_timestamp=final_genesis_timestamp, genesis_validators_root=genesis_validators_root, diff --git a/src/blockscout/blockscout_launcher.star b/src/blockscout/blockscout_launcher.star new file mode 100644 index 000000000..23ce9a366 --- /dev/null +++ b/src/blockscout/blockscout_launcher.star @@ -0,0 +1,131 @@ +shared_utils = import_module("../shared_utils/shared_utils.star") +constants = import_module("../package_io/constants.star") +postgres = import_module("github.com/kurtosis-tech/postgres-package/main.star") + +IMAGE_NAME_BLOCKSCOUT = "blockscout/blockscout:6.0.0" +IMAGE_NAME_BLOCKSCOUT_VERIF = "ghcr.io/blockscout/smart-contract-verifier:v1.6.0" + +SERVICE_NAME_BLOCKSCOUT = "blockscout" + +HTTP_PORT_ID = "http" +HTTP_PORT_NUMBER = 4000 +HTTP_PORT_NUMBER_VERIF = 8050 + +BLOCKSCOUT_MIN_CPU = 100 +BLOCKSCOUT_MAX_CPU = 1000 +BLOCKSCOUT_MIN_MEMORY = 1024 +BLOCKSCOUT_MAX_MEMORY = 2048 + +BLOCKSCOUT_VERIF_MIN_CPU = 10 +BLOCKSCOUT_VERIF_MAX_CPU = 1000 +BLOCKSCOUT_VERIF_MIN_MEMORY = 10 +BLOCKSCOUT_VERIF_MAX_MEMORY = 1024 + +USED_PORTS = { + HTTP_PORT_ID: shared_utils.new_port_spec( + HTTP_PORT_NUMBER, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ) +} + +VERIF_USED_PORTS = { + HTTP_PORT_ID: shared_utils.new_port_spec( + HTTP_PORT_NUMBER_VERIF, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ) +} + + +def launch_blockscout(plan, el_client_contexts, persistent): + postgres_output = postgres.run( + plan, + service_name="{}-postgres".format(SERVICE_NAME_BLOCKSCOUT), + database="blockscout", + extra_configs=["max_connections=1000"], + persistent=persistent, + ) + + el_client_context = el_client_contexts[0] + el_client_rpc_url = "http://{}:{}/".format( + el_client_context.ip_addr, el_client_context.rpc_port_num + ) + el_client_name = el_client_context.client_name + + config_verif = get_config_verif() + verif_service_name = "{}-verif".format(SERVICE_NAME_BLOCKSCOUT) + verif_service = plan.add_service(verif_service_name, config_verif) + verif_url = "http://{}:{}/api".format( + verif_service.hostname, verif_service.ports["http"].number + ) + + config_backend = get_config_backend( + postgres_output, el_client_rpc_url, verif_url, el_client_name + ) + blockscout_service = plan.add_service(SERVICE_NAME_BLOCKSCOUT, config_backend) + plan.print(blockscout_service) + + blockscout_url = "http://{}:{}".format( + blockscout_service.hostname, blockscout_service.ports["http"].number + ) + + return blockscout_url + + +def get_config_verif(): + return ServiceConfig( + image=IMAGE_NAME_BLOCKSCOUT_VERIF, + ports=VERIF_USED_PORTS, + env_vars={ + "SMART_CONTRACT_VERIFIER__SERVER__HTTP__ADDR": "0.0.0.0:{}".format( + HTTP_PORT_NUMBER_VERIF + ) + }, + min_cpu=BLOCKSCOUT_VERIF_MIN_CPU, + max_cpu=BLOCKSCOUT_VERIF_MAX_CPU, + min_memory=BLOCKSCOUT_VERIF_MIN_MEMORY, + max_memory=BLOCKSCOUT_VERIF_MAX_MEMORY, + ) + + +def get_config_backend(postgres_output, el_client_rpc_url, verif_url, el_client_name): + database_url = "{protocol}://{user}:{password}@{hostname}:{port}/{database}".format( + protocol="postgresql", + user=postgres_output.user, + password=postgres_output.password, + hostname=postgres_output.service.hostname, + port=postgres_output.port.number, + database=postgres_output.database, + ) + + return ServiceConfig( + image=IMAGE_NAME_BLOCKSCOUT, + ports=USED_PORTS, + cmd=[ + "/bin/sh", + "-c", + 'bin/blockscout eval "Elixir.Explorer.ReleaseTasks.create_and_migrate()" && bin/blockscout start', + ], + env_vars={ + "ETHEREUM_JSONRPC_VARIANT": el_client_name, + "ETHEREUM_JSONRPC_HTTP_URL": el_client_rpc_url, + "ETHEREUM_JSONRPC_TRACE_URL": el_client_rpc_url, + "DATABASE_URL": database_url, + "COIN": "ETH", + "MICROSERVICE_SC_VERIFIER_ENABLED": "true", + "MICROSERVICE_SC_VERIFIER_URL": verif_url, + "MICROSERVICE_SC_VERIFIER_TYPE": "sc_verifier", + "INDEXER_DISABLE_PENDING_TRANSACTIONS_FETCHER": "true", + "ECTO_USE_SSL": "false", + "NETWORK": "Kurtosis", + "SUBNETWORK": "Kurtosis", + "API_V2_ENABLED": "true", + "PORT": "{}".format(HTTP_PORT_NUMBER), + "SECRET_KEY_BASE": "56NtB48ear7+wMSf0IQuWDAAazhpb31qyc7GiyspBP2vh7t5zlCsF5QDv76chXeN", + }, + min_cpu=BLOCKSCOUT_MIN_CPU, + max_cpu=BLOCKSCOUT_MAX_CPU, + min_memory=BLOCKSCOUT_MIN_MEMORY, + max_memory=BLOCKSCOUT_MAX_MEMORY, + ) From d5966991653ad48094cf71d3c01612349a651877 Mon Sep 17 00:00:00 2001 From: pk910 Date: Thu, 8 Feb 2024 11:46:09 +0100 Subject: [PATCH 14/33] feat: improve built-in assertoor tests (#488) Improves the built-in assertoor tests to make them more resilient against non-critical failures. --- .../tests/all-opcodes-transaction-test.yaml | 212 +++++++++++++++++- .../tests/blob-transactions-test.yaml | 39 ++-- .../tests/eoa-transactions-test.yaml | 49 ++-- .../tests/validator-lifecycle-test.yaml | 81 ++++--- 4 files changed, 308 insertions(+), 73 deletions(-) diff --git a/static_files/assertoor-config/tests/all-opcodes-transaction-test.yaml b/static_files/assertoor-config/tests/all-opcodes-transaction-test.yaml index 1458e9450..5bae29e82 100644 --- a/static_files/assertoor-config/tests/all-opcodes-transaction-test.yaml +++ b/static_files/assertoor-config/tests/all-opcodes-transaction-test.yaml @@ -10,13 +10,14 @@ tasks: minClientCount: 1 - name: generate_transaction - title: "Generate all-opcodes test transaction" + title: "Execute all opcodes as contract deployment" config: feeCap: 5000000000 # 5 gwei gasLimit: 1000000 contractDeployment: true callData: "602a61053901600052600160206000a1602a61053902600052600260206000a1602a61053903600052600360206000a1602a61053904600052600460206000a17fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd661053905600052600560206000a1602a61053906600052600660206000a17fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd661053907600052600760206000a1610400602a61053908600052600860206000a1610400602a61053909600052600960206000a1602a6105390a600052600a60206000a1602a6105390b600052600b60206000a1602a61053910600052601060206000a1602a61053911600052601160206000a17fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd661053912600052601260206000a17fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd661053913600052601360206000a1602a61053914600052601460206000a161053915600052601560206000a1602a61053916600052601660206000a1602a61053917600052601760206000a1602a61053918600052601860206000a161053919600052601960206000a1602a6105391a600052601a60206000a1602a6105391b600052601b60206000a1602a6105391c600052601c60206000a17fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6105391d600052601d60206000a16105396000526020600020600052602060206000a130600052603060206000a13031600052603160206000a132600052603260206000a133600052603360206000a134600052603460206000a1600035600052603560206000a136600052603660206000a160146002600137603760206000a138600052603860206000a160146002600139603960206000a13a600052603a60206000a1303b600052603b60206000a1601460026001303c603c60206000a1686000388082803990f360b81b600052600960006000f0808060005260f060206000a167100000000000000060005260205f5f5f5f855af13d600052603d60206000a13d600060003e603e60206000a13f600052603f60206000a16001430340600052604060206000a141600052604160206000a142600052604260206000a143600052604360206000a144600052604460206000a145600052604560206000a146600052604660206000a147600052604760206000a148600052604860206000a1610539600150600052605060206000a1600151600052605160206000a1610539600052605260206000a1610539600053605360206000a1610539600155605560206000a1600154600052605460206000a16642424242424242600052600f5801566710000000000000006000525b605660206000a1602a61053911600f5801576710000000000000006000525b605760206000a158600052605860206000a159600052605960206000a15a600052605a60206000a16001600052606060206000a1610102600052606160206000a162010203600052606260206000a16301020304600052606360206000a1640102030405600052606460206000a165010203040506600052606560206000a16601020304050607600052606660206000a1670102030405060708600052606760206000a168010203040506070809600052606860206000a16901020304050607080910600052606960206000a16a0102030405060708091011600052606a60206000a16b010203040506070809101112600052606b60206000a16c01020304050607080910111213600052606c60206000a16d0102030405060708091011121314600052606d60206000a16e010203040506070809101112131415600052606e60206000a16f01020304050607080910111213141516600052606f60206000a1700102030405060708091011121314151617600052607060206000a171010203040506070809101112131415161718600052607160206000a17201020304050607080910111213141516171819600052607260206000a1730102030405060708091011121314151617181920600052607360206000a174010203040506070809101112131415161718192021600052607460206000a17501020304050607080910111213141516171819202122600052607560206000a1760102030405060708091011121314151617181920212223600052607660206000a177010203040506070809101112131415161718192021222324600052607760206000a17801020304050607080910111213141516171819202122232425600052607860206000a1790102030405060708091011121314151617181920212223242526600052607960206000a17a010203040506070809101112131415161718192021222324252627600052607a60206000a17b01020304050607080910111213141516171819202122232425262728600052607b60206000a17c0102030405060708091011121314151617181920212223242526272829600052607c60206000a17d010203040506070809101112131415161718192021222324252627282930600052607d60206000a17e01020304050607080910111213141516171819202122232425262728293031600052607e60206000a17f0102030405060708091011121314151617181920212223242526272829303132600052607f60206000a1600060116022603360446055606660776088609960aa60bb60cc60dd60ee60ff80600052608060206000a181600052608160206000a182600052608260206000a183600052608360206000a184600052608460206000a185600052608560206000a186600052608660206000a187600052608760206000a188600052608860206000a189600052608960206000a18a600052608a60206000a18b600052608b60206000a18c600052608c60206000a18d600052608d60206000a18e600052608e60206000a18f600052608f60206000a1604290600052609060206000a1604291600052609160206000a1604292600052609260206000a1604293600052609360206000a1604294600052609460206000a1604295600052609560206000a1604296600052609660206000a1604297600052609760206000a1604298600052609860206000a1604299600052609960206000a160429a600052609a60206000a160429b600052609b60206000a160429c600052609c60206000a160429d600052609d60206000a160429e600052609e60206000a160429f600052609f60206000a161133760005260206000a060a160206000a1601160a260206000a26022601160a360206000a360336022601160a460206000a4686000388082803990f360b81b600052600960006000f08060005260f060206000a160205f5f5f5f85612710f160005260f160206000a160205f5f5f5f85612710f260005260f260206000a160205f5f5f845af460005260f460206000a1686000388082803990f360b81b6000526000600960006000f58060005260f560206000a160205f5f5f84612710fa60005260fa60206000a16000388082803990f3" failOnReject: true + contractAddressResultVar: "testContractAddr" expectEvents: - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000001", data: "0x0000000000000000000000000000000000000000000000000000000000000563" } - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000002", data: "0x000000000000000000000000000000000000000000000000000000000000db5a" } @@ -69,6 +70,215 @@ tasks: - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000059", data: "0x0000000000000000000000000000000000000000000000000000000000000040" } configVars: privateKey: "walletPrivkey" +- name: generate_transaction + title: "Execute all opcodes as contract call" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 1000000 + callData: "1337133713371337" + failOnReject: true + expectEvents: + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000001", data: "0x0000000000000000000000000000000000000000000000000000000000000563" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000002", data: "0x000000000000000000000000000000000000000000000000000000000000db5a" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000003", data: "0x000000000000000000000000000000000000000000000000000000000000050f" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000004", data: "0x000000000000000000000000000000000000000000000000000000000000001f" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000005", data: "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000006", data: "0x0000000000000000000000000000000000000000000000000000000000000023" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000007", data: "0x0000000000000000000000000000000000000000000000000000000000000023" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000008", data: "0x0000000000000000000000000000000000000000000000000000000000000163" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000009", data: "0x000000000000000000000000000000000000000000000000000000000000035a" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000000a", data: "0x22216e0e1bc703e8543d93e59e8f927277d1501d039f87b7aeb01fd538f0ee71" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000000b", data: "0x000000000000000000000000000000000000000000000000000000000000002a" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000010", data: "0x0000000000000000000000000000000000000000000000000000000000000000" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000011", data: "0x0000000000000000000000000000000000000000000000000000000000000001" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000012", data: "0x0000000000000000000000000000000000000000000000000000000000000000" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000013", data: "0x0000000000000000000000000000000000000000000000000000000000000001" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000014", data: "0x0000000000000000000000000000000000000000000000000000000000000000" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000015", data: "0x0000000000000000000000000000000000000000000000000000000000000000" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000016", data: "0x0000000000000000000000000000000000000000000000000000000000000028" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000017", data: "0x000000000000000000000000000000000000000000000000000000000000053b" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000018", data: "0x0000000000000000000000000000000000000000000000000000000000000513" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000019", data: "0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffac6" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000001a", data: "0x0000000000000000000000000000000000000000000000000000000000000000" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000001b", data: "0x0000000000000000000000000000000000000000000000000000000000000000" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000001c", data: "0x0000000000000000000000000000000000000000000000000000000000000000" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000001d", data: "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000020", data: "0x64525377d0e4fdc0b5cb83d111f37debd7efc1f40a572ff8a92bbeeb587a5603" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000031", data: "0x0000000000000000000000000000000000000000000000000000000000000000" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000034", data: "0x0000000000000000000000000000000000000000000000000000000000000000" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000035", data: "0x1337133713371337000000000000000000000000000000000000000000000000" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000036", data: "0x0000000000000000000000000000000000000000000000000000000000000008" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000037", data: "0x0013371337133700000000000000000000000000000000000000000000000008" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000038", data: "0x0000000000000000000000000000000000000000000000000000000000000a38" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000039", data: "0x0061053901600052600160206000a1602a610539020000000000000000000a38" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000003b", data: "0x0000000000000000000000000000000000000000000000000000000000000a38" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000003c", data: "0x0061053901600052600160206000a1602a610539020000000000000000000a38" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000003d", data: "0x0000000000000000000000000000000000000000000000000000000000000009" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000003e", data: "0x6000388082803990f30000000000000000000000000000000000000000000009" } + - { topic0: "0x000000000000000000000000000000000000000000000000000000000000003f", data: "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000047", data: "0x0000000000000000000000000000000000000000000000000000000000000000" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000050", data: "0x0000000000000000000000000000000000000000000000000000000000000539" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000051", data: "0x0000000000000000000000000000000000000000000000000000000000053900" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000052", data: "0x0000000000000000000000000000000000000000000000000000000000000539" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000053", data: "0x3900000000000000000000000000000000000000000000000000000000000539" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000054", data: "0x0000000000000000000000000000000000000000000000000000000000000539" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000055", data: "0x3900000000000000000000000000000000000000000000000000000000000539" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000056", data: "0x0000000000000000000000000000000000000000000000000042424242424242" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000057", data: "0x0000000000000000000000000000000000000000000000000042424242424242" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000058", data: "0x0000000000000000000000000000000000000000000000000000000000000435" } + - { topic0: "0x0000000000000000000000000000000000000000000000000000000000000059", data: "0x0000000000000000000000000000000000000000000000000000000000000040" } + configVars: + privateKey: "walletPrivkey" + targetAddress: "testContractAddr" + +- name: generate_transaction + title: "Generate test transaction with STOP opcode" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 100000 + contractDeployment: true + callData: "006000388082803990f3" + awaitReceipt: false + configVars: + privateKey: "walletPrivkey" +- name: generate_transaction + title: "Generate test transaction with RETURN opcode" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 100000 + contractDeployment: true + callData: "61053960005260206000f3" + awaitReceipt: false + configVars: + privateKey: "walletPrivkey" +- name: generate_transaction + title: "Generate test transaction with REVERT opcode" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 100000 + contractDeployment: true + callData: "61053960005260206000fd" + awaitReceipt: false + configVars: + privateKey: "walletPrivkey" +- name: generate_transaction + title: "Generate test transaction with INVALID opcode" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 100000 + contractDeployment: true + callData: "610539600052fe" + awaitReceipt: false + configVars: + privateKey: "walletPrivkey" + + +# test precompiles +# deploy transient storage contract +- name: generate_transaction + title: "Deploy precompiles test contract" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 2500000 + contractDeployment: true + callData: "608060405234801561000f575f80fd5b5060015f1b60405161002090610081565b8190604051809103905ff590508015801561003d573d5f803e3d5ffd5b505f806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061008e565b610cbc806113d083390190565b6113358061009b5f395ff3fe608060405234801561000f575f80fd5b5060043610610060575f3560e01c80630a8e8e01146100645780631ad7be821461006e57806366e41cb7146100785780636b59084d146100825780638f0d282d1461008c578063a7deec9214610096575b5f80fd5b61006c6100a0565b005b610076610163565b005b61008061026f565b005b61008a61033c565b005b61009461044a565b005b61009e61056b565b005b5f60036113376040516020016100b6919061093f565b6040516020818303038152906040526040516100d291906109c5565b602060405180830381855afa1580156100ed573d5f803e3d5ffd5b5050506040515160601b6bffffffffffffffffffffffff1916905060037f02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d58260405160200161013c9190610a04565b6040516020818303038152906040526040516101589190610a76565b60405180910390a250565b5f805f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16633148f14f634fb110d263fcde41b26604c01db400b0c96040518463ffffffff1660e01b81526004016101d093929190610b53565b6020604051808303815f875af11580156101ec573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906102109190610bc3565b905060057f02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d5825f1b6040516020016102489190610a04565b6040516020818303038152906040526040516102649190610a76565b60405180910390a250565b5f6002611337604051602001610285919061093f565b6040516020818303038152906040526040516102a191906109c5565b602060405180830381855afa1580156102bc573d5f803e3d5ffd5b5050506040513d601f19601f820116820180604052508101906102df9190610c18565b905060027f02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d5826040516020016103159190610a04565b6040516020818303038152906040526040516103319190610a76565b60405180910390a250565b5f60017f345d9e6eb0778ac44a2803c061bf16a9cbd04495237b69fc85ad7ab2e256d9ee601c7f198177033ef6625421cd1b7ef6036264face53da5da4d7f2948aef3edf7e3f957f5c8fcf4db887386224512af70a8bc50d678069359c4d208a496d3a47339c78106040515f81526020016040526040516103c09493929190610d3e565b6020604051602081039080840390855afa1580156103e0573d5f803e3d5ffd5b50505060206040510351905060017f02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d58260601b6040516020016104239190610dcc565b60405160208183030381529060405260405161043f9190610a76565b60405180910390a250565b5f7f13371337133713371337133713371337133713371337133713371337133713375f1b90505f805f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663840f6120836040516020016104be9190610a04565b6040516020818303038152906040526040518263ffffffff1660e01b81526004016104e99190610a76565b5f604051808303815f875af1158015610504573d5f803e3d5ffd5b505050506040513d5f823e3d601f19601f8201168201806040525081019061052c9190610f04565b905060047f02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d58260405161055f9190610a76565b60405180910390a25050565b5f600c9050610578610898565b7f48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5815f600281106105ac576105ab610f4b565b5b6020020181815250507fd182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b816001600281106105ea576105e9610f4b565b5b6020020181815250506105fb6108ba565b7f6162630000000000000000000000000000000000000000000000000000000000815f6004811061062f5761062e610f4b565b5b6020020181815250505f8160016004811061064d5761064c610f4b565b5b6020020181815250505f8160026004811061066b5761066a610f4b565b5b6020020181815250505f8160036004811061068957610688610f4b565b5b60200201818152505061069a6108dc565b7f0300000000000000000000000000000000000000000000000000000000000000815f600281106106ce576106cd610f4b565b5b602002019077ffffffffffffffffffffffffffffffffffffffffffffffff1916908177ffffffffffffffffffffffffffffffffffffffffffffffff1916815250505f8160016002811061072457610723610f4b565b5b602002019077ffffffffffffffffffffffffffffffffffffffffffffffff1916908177ffffffffffffffffffffffffffffffffffffffffffffffff1916815250505f805f9054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166373fe73148686868660016040518663ffffffff1660e01b81526004016107c89594939291906111a4565b6040805180830381865afa1580156107e2573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061080691906112a9565b905060097f02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d5825f6002811061083e5761083d610f4b565b5b60200201518360016002811061085757610856610f4b565b5b602002015160405160200161086d9291906112d4565b6040516020818303038152906040526040516108899190610a76565b60405180910390a25050505050565b6040518060400160405280600290602082028036833780820191505090505090565b6040518060800160405280600490602082028036833780820191505090505090565b6040518060400160405280600290602082028036833780820191505090505090565b5f61ffff82169050919050565b5f8160f01b9050919050565b5f6109218261090b565b9050919050565b610939610934826108fe565b610917565b82525050565b5f61094a8284610928565b60028201915081905092915050565b5f81519050919050565b5f81905092915050565b5f5b8381101561098a57808201518184015260208101905061096f565b5f8484015250505050565b5f61099f82610959565b6109a98185610963565b93506109b981856020860161096d565b80840191505092915050565b5f6109d08284610995565b915081905092915050565b5f819050919050565b5f819050919050565b6109fe6109f9826109db565b6109e4565b82525050565b5f610a0f82846109ed565b60208201915081905092915050565b5f82825260208201905092915050565b5f601f19601f8301169050919050565b5f610a4882610959565b610a528185610a1e565b9350610a6281856020860161096d565b610a6b81610a2e565b840191505092915050565b5f6020820190508181035f830152610a8e8184610a3e565b905092915050565b5f819050919050565b5f819050919050565b5f819050919050565b5f610acb610ac6610ac184610a96565b610aa8565b610a9f565b9050919050565b610adb81610ab1565b82525050565b5f819050919050565b5f610b04610aff610afa84610ae1565b610aa8565b610a9f565b9050919050565b610b1481610aea565b82525050565b5f819050919050565b5f610b3d610b38610b3384610b1a565b610aa8565b610a9f565b9050919050565b610b4d81610b23565b82525050565b5f606082019050610b665f830186610ad2565b610b736020830185610b0b565b610b806040830184610b44565b949350505050565b5f604051905090565b5f80fd5b5f80fd5b610ba281610a9f565b8114610bac575f80fd5b50565b5f81519050610bbd81610b99565b92915050565b5f60208284031215610bd857610bd7610b91565b5b5f610be584828501610baf565b91505092915050565b610bf7816109db565b8114610c01575f80fd5b50565b5f81519050610c1281610bee565b92915050565b5f60208284031215610c2d57610c2c610b91565b5b5f610c3a84828501610c04565b91505092915050565b5f819050919050565b5f815f1b9050919050565b5f610c71610c6c610c6784610c43565b610c4c565b6109db565b9050919050565b610c8181610c57565b82525050565b5f819050919050565b5f60ff82169050919050565b5f610cb6610cb1610cac84610c87565b610aa8565b610c90565b9050919050565b610cc681610c9c565b82525050565b5f819050919050565b5f610cef610cea610ce584610ccc565b610c4c565b6109db565b9050919050565b610cff81610cd5565b82525050565b5f819050919050565b5f610d28610d23610d1e84610d05565b610c4c565b6109db565b9050919050565b610d3881610d0e565b82525050565b5f608082019050610d515f830187610c78565b610d5e6020830186610cbd565b610d6b6040830185610cf6565b610d786060830184610d2f565b95945050505050565b5f7fffffffffffffffffffffffffffffffffffffffff00000000000000000000000082169050919050565b5f819050919050565b610dc6610dc182610d81565b610dac565b82525050565b5f610dd78284610db5565b60148201915081905092915050565b5f80fd5b5f80fd5b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b610e2482610a2e565b810181811067ffffffffffffffff82111715610e4357610e42610dee565b5b80604052505050565b5f610e55610b88565b9050610e618282610e1b565b919050565b5f67ffffffffffffffff821115610e8057610e7f610dee565b5b610e8982610a2e565b9050602081019050919050565b5f610ea8610ea384610e66565b610e4c565b905082815260208101848484011115610ec457610ec3610dea565b5b610ecf84828561096d565b509392505050565b5f82601f830112610eeb57610eea610de6565b5b8151610efb848260208601610e96565b91505092915050565b5f60208284031215610f1957610f18610b91565b5b5f82015167ffffffffffffffff811115610f3657610f35610b95565b5b610f4284828501610ed7565b91505092915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b5f63ffffffff82169050919050565b610f9081610f78565b82525050565b5f60029050919050565b5f81905092915050565b5f819050919050565b610fbc816109db565b82525050565b5f610fcd8383610fb3565b60208301905092915050565b5f602082019050919050565b610fee81610f96565b610ff88184610fa0565b925061100382610faa565b805f5b8381101561103357815161101a8782610fc2565b965061102583610fd9565b925050600181019050611006565b505050505050565b5f60049050919050565b5f81905092915050565b5f819050919050565b5f602082019050919050565b61106d8161103b565b6110778184611045565b92506110828261104f565b805f5b838110156110b25781516110998782610fc2565b96506110a483611058565b925050600181019050611085565b505050505050565b5f60029050919050565b5f81905092915050565b5f819050919050565b5f7fffffffffffffffff00000000000000000000000000000000000000000000000082169050919050565b61110b816110d7565b82525050565b5f61111c8383611102565b60208301905092915050565b5f602082019050919050565b61113d816110ba565b61114781846110c4565b9250611152826110ce565b805f5b838110156111825781516111698782611111565b965061117483611128565b925050600181019050611155565b505050505050565b5f8115159050919050565b61119e8161118a565b82525050565b5f610140820190506111b85f830188610f87565b6111c56020830187610fe5565b6111d26060830186611064565b6111df60e0830185611134565b6111ed610120830184611195565b9695505050505050565b5f67ffffffffffffffff82111561121157611210610dee565b5b602082029050919050565b5f80fd5b5f61123261122d846111f7565b610e4c565b9050806020840283018581111561124c5761124b61121c565b5b835b8181101561127557806112618882610c04565b84526020840193505060208101905061124e565b5050509392505050565b5f82601f83011261129357611292610de6565b5b60026112a0848285611220565b91505092915050565b5f604082840312156112be576112bd610b91565b5b5f6112cb8482850161127f565b91505092915050565b5f6112df82856109ed565b6020820191506112ef82846109ed565b602082019150819050939250505056fea2646970667358221220092eb79db8ed3e8ec4659bb3d1243bdd463b22f9ef4c8107374640408b69f4a964736f6c63430008180033608060405234801561000f575f80fd5b50610c9f8061001d5f395ff3fe608060405234801561000f575f80fd5b5060043610610060575f3560e01c80633148f14f1461006457806373fe731414610094578063840f6120146100c4578063914a900a146100f4578063bf45767a14610112578063f707cb7414610130575b5f80fd5b61007e600480360381019061007991906103a8565b61014e565b60405161008b9190610407565b60405180910390f35b6100ae60048036038101906100a991906107b0565b610199565b6040516100bb91906108ce565b60405180910390f35b6100de60048036038101906100d99190610997565b6102bf565b6040516100eb9190610a58565b60405180910390f35b6100fc610333565b6040516101099190610ab7565b60405180910390f35b61011a610338565b6040516101279190610ab7565b60405180910390f35b61013861033d565b6040516101459190610ab7565b60405180910390f35b5f60405160208152602080820152602060408201528460608201528360808201528260a082015260c05160208160c0845f60055f19f161018c575f80fd5b8051925050509392505050565b6101a1610342565b6101a9610342565b5f87875f600281106101be576101bd610ad0565b5b6020020151886001600281106101d7576101d6610ad0565b5b6020020151885f600481106101ef576101ee610ad0565b5b60200201518960016004811061020857610207610ad0565b5b60200201518a60026004811061022157610220610ad0565b5b60200201518b60036004811061023a57610239610ad0565b5b60200201518b5f6002811061025257610251610ad0565b5b60200201518c60016002811061026b5761026a610ad0565b5b60200201518c60405160200161028a9a99989796959493929190610bb6565b604051602081830303815290604052905060408260d56020840160095f19fa6102b1575f80fd5b819250505095945050505050565b60605f825167ffffffffffffffff8111156102dd576102dc61046d565b5b6040519080825280601f01601f19166020018201604052801561030f5781602001600182028036833780820191505090505b5090508251806020830182602087015f60045af161032957fe5b5080915050919050565b600981565b600481565b600581565b6040518060400160405280600290602082028036833780820191505090505090565b5f604051905090565b5f80fd5b5f80fd5b5f819050919050565b61038781610375565b8114610391575f80fd5b50565b5f813590506103a28161037e565b92915050565b5f805f606084860312156103bf576103be61036d565b5b5f6103cc86828701610394565b93505060206103dd86828701610394565b92505060406103ee86828701610394565b9150509250925092565b61040181610375565b82525050565b5f60208201905061041a5f8301846103f8565b92915050565b5f63ffffffff82169050919050565b61043881610420565b8114610442575f80fd5b50565b5f813590506104538161042f565b92915050565b5f80fd5b5f601f19601f8301169050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b6104a38261045d565b810181811067ffffffffffffffff821117156104c2576104c161046d565b5b80604052505050565b5f6104d4610364565b90506104e0828261049a565b919050565b5f67ffffffffffffffff8211156104ff576104fe61046d565b5b602082029050919050565b5f80fd5b5f819050919050565b6105208161050e565b811461052a575f80fd5b50565b5f8135905061053b81610517565b92915050565b5f61055361054e846104e5565b6104cb565b9050806020840283018581111561056d5761056c61050a565b5b835b818110156105965780610582888261052d565b84526020840193505060208101905061056f565b5050509392505050565b5f82601f8301126105b4576105b3610459565b5b60026105c1848285610541565b91505092915050565b5f67ffffffffffffffff8211156105e4576105e361046d565b5b602082029050919050565b5f6106016105fc846105ca565b6104cb565b9050806020840283018581111561061b5761061a61050a565b5b835b818110156106445780610630888261052d565b84526020840193505060208101905061061d565b5050509392505050565b5f82601f83011261066257610661610459565b5b600461066f8482856105ef565b91505092915050565b5f67ffffffffffffffff8211156106925761069161046d565b5b602082029050919050565b5f7fffffffffffffffff00000000000000000000000000000000000000000000000082169050919050565b6106d18161069d565b81146106db575f80fd5b50565b5f813590506106ec816106c8565b92915050565b5f6107046106ff84610678565b6104cb565b9050806020840283018581111561071e5761071d61050a565b5b835b81811015610747578061073388826106de565b845260208401935050602081019050610720565b5050509392505050565b5f82601f83011261076557610764610459565b5b60026107728482856106f2565b91505092915050565b5f8115159050919050565b61078f8161077b565b8114610799575f80fd5b50565b5f813590506107aa81610786565b92915050565b5f805f805f61014086880312156107ca576107c961036d565b5b5f6107d788828901610445565b95505060206107e8888289016105a0565b94505060606107f98882890161064e565b93505060e061080a88828901610751565b92505061012061081c8882890161079c565b9150509295509295909350565b5f60029050919050565b5f81905092915050565b5f819050919050565b61084f8161050e565b82525050565b5f6108608383610846565b60208301905092915050565b5f602082019050919050565b61088181610829565b61088b8184610833565b92506108968261083d565b805f5b838110156108c65781516108ad8782610855565b96506108b88361086c565b925050600181019050610899565b505050505050565b5f6040820190506108e15f830184610878565b92915050565b5f80fd5b5f67ffffffffffffffff8211156109055761090461046d565b5b61090e8261045d565b9050602081019050919050565b828183375f83830152505050565b5f61093b610936846108eb565b6104cb565b905082815260208101848484011115610957576109566108e7565b5b61096284828561091b565b509392505050565b5f82601f83011261097e5761097d610459565b5b813561098e848260208601610929565b91505092915050565b5f602082840312156109ac576109ab61036d565b5b5f82013567ffffffffffffffff8111156109c9576109c8610371565b5b6109d58482850161096a565b91505092915050565b5f81519050919050565b5f82825260208201905092915050565b5f5b83811015610a155780820151818401526020810190506109fa565b5f8484015250505050565b5f610a2a826109de565b610a3481856109e8565b9350610a448185602086016109f8565b610a4d8161045d565b840191505092915050565b5f6020820190508181035f830152610a708184610a20565b905092915050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f610aa182610a78565b9050919050565b610ab181610a97565b82525050565b5f602082019050610aca5f830184610aa8565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b5f8160e01b9050919050565b5f610b1382610afd565b9050919050565b610b2b610b2682610420565b610b09565b82525050565b5f819050919050565b610b4b610b468261050e565b610b31565b82525050565b5f819050919050565b610b6b610b668261069d565b610b51565b82525050565b5f8160f81b9050919050565b5f610b8782610b71565b9050919050565b5f610b9882610b7d565b9050919050565b610bb0610bab8261077b565b610b8e565b82525050565b5f610bc1828d610b1a565b600482019150610bd1828c610b3a565b602082019150610be1828b610b3a565b602082019150610bf1828a610b3a565b602082019150610c018289610b3a565b602082019150610c118288610b3a565b602082019150610c218287610b3a565b602082019150610c318286610b5a565b600882019150610c418285610b5a565b600882019150610c518284610b9f565b6001820191508190509b9a505050505050505050505056fea26469706673582212205a5ef194d0db70e01a3c706dd56c7be60a9e1f132cbfdb006a802283a583b9a064736f6c63430008180033" + failOnReject: true + contractAddressResultVar: "precompilesTestContractAddr" + configVars: + privateKey: "walletPrivkey" +# check precompiles +- name: generate_transaction + title: "Call precompiles test contract: test1() ecrecover" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 200000 + callData: "0x6b59084d" + expectEvents: + - { topic0: "0x02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d5", topic1: "0x0000000000000000000000000000000000000000000000000000000000000001", data: "0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000014be3f73d5867365d38da69e1a90f2c4cf026100ff000000000000000000000000" } + configVars: + targetAddress: "precompilesTestContractAddr" + privateKey: "walletPrivkey" +- name: generate_transaction + title: "Call precompiles test contract: test2() sha256" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 200000 + callData: "0x66e41cb7" + expectEvents: + - { topic0: "0x02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d5", topic1: "0x0000000000000000000000000000000000000000000000000000000000000002", data: "0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000020158760c856e5ea1ba97e2e2a456736c4bf30d964559afa6d748cf05694a636ff" } + configVars: + targetAddress: "precompilesTestContractAddr" + privateKey: "walletPrivkey" +- name: generate_transaction + title: "Call precompiles test contract: test3() ripemd160" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 200000 + callData: "0x0a8e8e01" + expectEvents: + - { topic0: "0x02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d5", topic1: "0x0000000000000000000000000000000000000000000000000000000000000003", data: "0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000020224d2bd5251d8f9faa114eb0826e371d1236fda1000000000000000000000000" } + configVars: + targetAddress: "precompilesTestContractAddr" + privateKey: "walletPrivkey" +- name: generate_transaction + title: "Call precompiles test contract: test4() identity" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 200000 + callData: "0x8f0d282d" + expectEvents: + - { topic0: "0x02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d5", topic1: "0x0000000000000000000000000000000000000000000000000000000000000004", data: "0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000201337133713371337133713371337133713371337133713371337133713371337" } + configVars: + targetAddress: "precompilesTestContractAddr" + privateKey: "walletPrivkey" +- name: generate_transaction + title: "Call precompiles test contract: test5() modExp" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 200000 + callData: "0x1ad7be82" + expectEvents: + - { topic0: "0x02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d5", topic1: "0x0000000000000000000000000000000000000000000000000000000000000005", data: "0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000307943feabf46" } + configVars: + targetAddress: "precompilesTestContractAddr" + privateKey: "walletPrivkey" +- name: generate_transaction + title: "Call precompiles test contract: test9() blake2F" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 200000 + callData: "0xa7deec92" + expectEvents: + - { topic0: "0x02f3e89081ef16f09f0e2ffdcf090ded6d9b3873ccd94513b60b4e667132f2d5", topic1: "0x0000000000000000000000000000000000000000000000000000000000000009", data: "0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d17d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923" } + configVars: + targetAddress: "precompilesTestContractAddr" + privateKey: "walletPrivkey" + + + +# deploy zk proof contract +- name: generate_transaction + title: "Deploy test zk proof contract" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 2500000 + contractDeployment: true + callData: "608060405234801561000f575f80fd5b5060405161001c90610079565b604051809103905ff080158015610035573d5f803e3d5ffd5b505f806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550610086565b611c1e8061078a83390190565b6106f7806100935f395ff3fe608060405234801561000f575f80fd5b5060043610610033575f3560e01c8062c80d8114610037578063e4886e5014610067575b5f80fd5b610051600480360381019061004c91906103ca565b610085565b60405161005e919061045a565b60405180910390f35b61006f610163565b60405161007c91906104ed565b60405180910390f35b5f805f8054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16631e8e1e1385856040518363ffffffff1660e01b81526004016100e1929190610637565b602060405180830381865afa1580156100fc573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906101209190610696565b90507fb6030296ed6d28f593d0c257c080ef243cd60eb419e82d45d9e530c59391510281604051610151919061045a565b60405180910390a18091505092915050565b5f8054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b5f604051905090565b5f80fd5b5f80fd5b5f80fd5b5f80fd5b5f601f19601f8301169050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b6101e58261019f565b810181811067ffffffffffffffff82111715610204576102036101af565b5b80604052505050565b5f610216610186565b905061022282826101dc565b919050565b5f67ffffffffffffffff821115610241576102406101af565b5b61024a8261019f565b9050602081019050919050565b828183375f83830152505050565b5f61027761027284610227565b61020d565b9050828152602081018484840111156102935761029261019b565b5b61029e848285610257565b509392505050565b5f82601f8301126102ba576102b9610197565b5b81356102ca848260208601610265565b91505092915050565b5f67ffffffffffffffff8211156102ed576102ec6101af565b5b602082029050602081019050919050565b5f80fd5b5f819050919050565b61031481610302565b811461031e575f80fd5b50565b5f8135905061032f8161030b565b92915050565b5f610347610342846102d3565b61020d565b9050808382526020820190506020840283018581111561036a576103696102fe565b5b835b81811015610393578061037f8882610321565b84526020840193505060208101905061036c565b5050509392505050565b5f82601f8301126103b1576103b0610197565b5b81356103c1848260208601610335565b91505092915050565b5f80604083850312156103e0576103df61018f565b5b5f83013567ffffffffffffffff8111156103fd576103fc610193565b5b610409858286016102a6565b925050602083013567ffffffffffffffff81111561042a57610429610193565b5b6104368582860161039d565b9150509250929050565b5f8115159050919050565b61045481610440565b82525050565b5f60208201905061046d5f83018461044b565b92915050565b5f73ffffffffffffffffffffffffffffffffffffffff82169050919050565b5f819050919050565b5f6104b56104b06104ab84610473565b610492565b610473565b9050919050565b5f6104c68261049b565b9050919050565b5f6104d7826104bc565b9050919050565b6104e7816104cd565b82525050565b5f6020820190506105005f8301846104de565b92915050565b5f81519050919050565b5f82825260208201905092915050565b5f5b8381101561053d578082015181840152602081019050610522565b5f8484015250505050565b5f61055282610506565b61055c8185610510565b935061056c818560208601610520565b6105758161019f565b840191505092915050565b5f81519050919050565b5f82825260208201905092915050565b5f819050602082019050919050565b6105b281610302565b82525050565b5f6105c383836105a9565b60208301905092915050565b5f602082019050919050565b5f6105e582610580565b6105ef818561058a565b93506105fa8361059a565b805f5b8381101561062a57815161061188826105b8565b975061061c836105cf565b9250506001810190506105fd565b5085935050505092915050565b5f6040820190508181035f83015261064f8185610548565b9050818103602083015261066381846105db565b90509392505050565b61067581610440565b811461067f575f80fd5b50565b5f815190506106908161066c565b92915050565b5f602082840312156106ab576106aa61018f565b5b5f6106b884828501610682565b9150509291505056fea2646970667358221220088f04d06811a654c4621ef9c53e2ba3ed7768ddb71492f34243795727ab3c9e64736f6c63430008180033608060405234801561000f575f80fd5b50611c018061001d5f395ff3fe608060405234801561000f575f80fd5b5060043610610029575f3560e01c80631e8e1e131461002d575b5f80fd5b61004760048036038101906100429190611b22565b61005d565b6040516100549190611bb2565b60405180910390f35b5f611875565b5f80600184845f805b8215610096578284059150848202860390508495508094508282028403905082935080925061006c565b60018411156100a3575f80fd5b5f8612156100b15788860195505b85965050505050505092915050565b6040518160208402830181516020830192505f5b82841015610117578185527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018451830991506020850194506020840193506100d4565b6101417f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000183610063565b91506020850394506020840393508592505b828411156101bd577f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018551830990507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001845183099150808452602085039450602084039350610153565b81845250505050505050565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000181106101f8575f805260205ff35b50565b61032081511461020d575f805260205ff35b61021b6102608201516101c9565b6102296102808201516101c9565b6102376102a08201516101c9565b6102456102c08201516101c9565b6102536102e08201516101c9565b6102616103008201516101c9565b61026f6103208201516101c9565b50565b5f8060208501516102c085015260208301516102e08501526020800183015161030085015260406020018301516103208501526060602001830151610340850152608060200183015161036085015260a06020018301516103808501527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160e06102c08601200690508060208501527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016020808601200660408501527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001604060e0850120065f8501527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160c06101208501200691508160608501527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182820960a08501527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182830991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182830991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182830991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182830991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182830991508160808501527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000180600184030106915081610260850152816102808501527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160e0610260850120068060c08601527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000181820992508260e08601527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018184099250826101008601527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018184099250826101208601527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018184099250826101408601527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018184099250826101608601527f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160806101e086012006610180860152505050505050565b60017f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001807f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018360608601510301066020096102a0830152610660600261028084016100c0565b610260820151600191507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001816102a0850151096102a0840152505050565b5f7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001807f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160208601516102a086015109830301069050806101a0830152505050565b5f805f7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016101a08601516103208601510892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160208601516102c08601510991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001610260850151830891507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016040860151830891507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160208601516102e08601510990507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001610280850151820890507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016040860151820890507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000181830991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160408601516102a08601510890507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000181830991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001610300850151830991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000015f860151830991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000015f8601516102a08701510990507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000015f860151820990507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000181830891507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001827f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018501030692507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000161028086015184099250826101c08601525050505050565b81518152602082015160208201525050565b6040518151815260208201516020820152825160408201526020830151606082015260408260808360066107d05a03fa80610a58575f805260205ff35b50505050565b5f604051835181526020840151602082015284604082015260408160608360076107d05a03fa915081610a93575f805260205ff35b825160408201526020830151606082015260408360808360066107d05a03fa915081610ac1575f805260205ff35b5050505050565b5f60405183815284602082015285604082015260408160608360076107d05a03fa915081610af8575f805260205ff35b825160408201526020830151606082015260408360808360066107d05a03fa915081610b26575f805260205ff35b505050505050565b5f60405183815284602082015285604082015260408360608360076107d05a03fa915081610b5e575f805260205ff35b505050505050565b6101e08201610b796101e0830182610a09565b610b8d610180840151610220840183610a5e565b505050565b5f8061022084017f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160c0860151610260860151099250610c14837f02e54e0275445e269e1d5b593c671c5a3c7787deb99d877420be33f7376385397f28999a24cf41cb7f5477b05417ec5f4f2d460a89caae0e07e3eaeba5ac56812584610b2e565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000161028085015184099250610c8b837f09742d1fa784edca3549812b0121572ca9dbb35dd4684045d772408865fa1e4b7f0ed42b61c48fffe8fb7797ad3542b49fc39a4f2cc51d40babd0e31362ac7e6d884610ac8565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160c0860151610280860151099250610d06837f2a375c17bc0f07607aba1517c4c844c2e824e02ce712b253ba48b5258f9421707f2677e46f1525ed41b063ecfc461eb535202c78c510191bab9456e0c064da455484610ac8565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160c08601516102a0860151099250610d81837f08c64ec72e440f97910f7526a262f94ad8ac64b8d327d516e7fdbfe6f9e0005e7f024e03e3f4293766318b713ef6eb4b4d2e4a80e8d9d1329256c51a494d1f5da084610ac8565b60c08501519250610dd4837f2b62733ca9c47ebbcae7d4ceb2db867887ae2ef66118980cc79874ca1d0e22147f058f9d58ff1bdff84c5c0f49b4a39b5dedd652e91514acbf4b0ecbbdfc25786884610ac8565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160a08601516102608601510892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016040860151840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160a086015160020991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001610280850151830891507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016040860151830891507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182840992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160a086015160030991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016102a0850151830891507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016040860151830891507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182840992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000015f860151840992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160c0860151840992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000015f8601516102a08701510991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000015f860151830991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160c0860151830991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001610180860151840892506110ac8360e0860183610a5e565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016102c085015160208701510992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001610260850151840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016040860151840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016102e085015160208701510991507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001610280850151830891507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016040860151830891507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182840992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000015f860151840992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160c0860151840992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000016020860151840992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001610300850151840992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001837f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000103069250611315837f24c147567dafd4191e1a6b6a0454ba621801ab61e998a38939bdbb64d664760e7f29b6c1b3e9065435d429d078311433f67a86ea5361159e8f740541899c94c0a884610ac8565b611323610120850182610a1b565b6080850151925061133983610160860183610a5e565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001838409925061136e836101a0860183610a5e565b61138060e08601516020860183610a5e565b6113936101008601516060860183610a5e565b6113a661012086015160a0860183610a5e565b6113f76101408601517f0e65e2f35d97f40565921b1fdef35f329fa140b6149b718a493217dfa926d9477f2d6c9d3011d9e5b219e4005594f20926562891671887abea906656470d3f1e2184610ac8565b6114486101608601517f0f555be3adf1604edc317b91425104be179f4152774a5b8c0941aeb92ac4abf87f1dbde0c37609ac6a5ab8f393f5c494df1ff84e28c46e93fc04d69f6f0e0d87bc84610ac8565b6101c085015192507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018060c087015161032087015109840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018060e087015161026087015109840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018061010087015161028087015109840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001806101208701516102a087015109840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001806101408701516102c087015109840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001806101608701516102e087015109840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000018061018087015161030087015109840892507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001837f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000103069250611608836002600184610ac8565b6060850151925061161e836101e0860183610a5e565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000160608601516101808701510992507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000017f09c532c6306b93d29678200d47c0b2a99c18d51b838eeb1d3eed4c533bb512d0840992506116a283610220860183610a5e565b5050505050565b5f6040516101e0830151815260206101e08401015160208201527f26186a2d65ee4d2f9c9a5b91f86597d35f192cd120caf7e935d8443d1938e23d60408201527f30441fd1b5d3370482c42152a8899027716989a6996c2535bc9f7fee8aaef79e60608201527f1970ea81dd6992adfbc571effb03503adbbb6a857f578403c6c40e22d65b3c0260808201527f054793348f12c0cf5622c340573cb277586319de359ab9389778f689786b1e4860a082015261022083015160c08201526020610220840101517f30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47817f30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47030690508060e08301527f198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c26101008301527f1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed6101208301527f090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b6101408301527f12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa6101608301526020826101808460086107d05a03fa825181169350505050919050565b6040516102c08101604052611889846101fb565b611894838286610272565b61189d816105fa565b6118a7838261069e565b6118b18185610700565b6118bb8185610b66565b6118c58185610b92565b6118ce816116a9565b6102c08203604052805f5260205ff35b5f604051905090565b5f80fd5b5f80fd5b5f80fd5b5f80fd5b5f601f19601f8301169050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b61193d826118f7565b810181811067ffffffffffffffff8211171561195c5761195b611907565b5b80604052505050565b5f61196e6118de565b905061197a8282611934565b919050565b5f67ffffffffffffffff82111561199957611998611907565b5b6119a2826118f7565b9050602081019050919050565b828183375f83830152505050565b5f6119cf6119ca8461197f565b611965565b9050828152602081018484840111156119eb576119ea6118f3565b5b6119f68482856119af565b509392505050565b5f82601f830112611a1257611a116118ef565b5b8135611a228482602086016119bd565b91505092915050565b5f67ffffffffffffffff821115611a4557611a44611907565b5b602082029050602081019050919050565b5f80fd5b5f819050919050565b611a6c81611a5a565b8114611a76575f80fd5b50565b5f81359050611a8781611a63565b92915050565b5f611a9f611a9a84611a2b565b611965565b90508083825260208201905060208402830185811115611ac257611ac1611a56565b5b835b81811015611aeb5780611ad78882611a79565b845260208401935050602081019050611ac4565b5050509392505050565b5f82601f830112611b0957611b086118ef565b5b8135611b19848260208601611a8d565b91505092915050565b5f8060408385031215611b3857611b376118e7565b5b5f83013567ffffffffffffffff811115611b5557611b546118eb565b5b611b61858286016119fe565b925050602083013567ffffffffffffffff811115611b8257611b816118eb565b5b611b8e85828601611af5565b9150509250929050565b5f8115159050919050565b611bac81611b98565b82525050565b5f602082019050611bc55f830184611ba3565b9291505056fea2646970667358221220c386006c749b86961627b412b0eecae599db372813a409cbbf4078a254aa84de64736f6c63430008180033" + failOnReject: true + contractAddressResultVar: "zkProofContractAddr" + configVars: + privateKey: "walletPrivkey" +# check proof +- name: generate_transaction + title: "Call test zk proof verification" + config: + feeCap: 5000000000 # 5 gwei + gasLimit: 1000000 + callData: "0x00c80d81000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000003800000000000000000000000000000000000000000000000000000000000000320001e50e3518a0332a2715e62492b3e7e53c30ccb4e019227bdee4342d42ea18e29e4035a088e27c0379bc0b7fa298d6e111263aa9532e4b04e084afc492c0a5c0ac16f7e6b5a4365d9cdc8e722b8ba275e414a167965a1275505f2c0557a448526253d37b2b56fbdc7a653cb0b751dbe4ed2b0864ecbdcdd3f912761bdcbfcb424bc4fa98ded41603f82fc9378708971b2b249c9e4068148e009ba904fc6181104fa3e91c821d2c004f30df7d127d3df56561926f0bfa9496a55840e7ab8aa8a23f2b298e0bb72df59936c4184d5a865211b28771b4243f57797409307d0b0250e7f3300b3b96d1f246db13c412a94163ec30f363c03ff9b6b287c484f00acdf21c454551599290e96d1a50c80ec4fac99c3edc18f1e63ca94cf0fde08d9cb2b27d69c5a1fec263a669aa8f0d21213ba6711c99fc4f49f8161eab74fb21be46e1585b25bfd0a1d9494d07484388242a8b96c9222e9df99512a1f5b1f4a7812ae0374769e26f20c8af66ba9bb2a0ab95b4bcb4bbd9a2a8b89956fb72ec817b6c21d4bb305780ced6b622e28b8ce816108a22d246a9d1b7d26dc5f3bb534bf4f3f0139139999d43aeeecf1ffa3016903093c3ca306a4aec045fb689c16c0b43b63047ba614c3ab41ac5db347b2070e64e727f671a987a3713c69d1eaf0e3acc96a06f49359839708466ca6cfecb4f636adc0fa4483a132276be8a9cce5a17899f1002d4015bd025ab6417fdd6f61b2fdf2056ae5e8c6492e4ddf084d96acd4014f2f1436fa9d696a63a9bf010dbb6f382c98e18b9bbf14de8d7a3e0ad6aec6b4b425eab9097a5a920803f039ded725975f2678261918e187bed0b38aef9647013e2c6425f639f1b070031961d7d2200508dcbf6e467f4dcc9a421352fe8c1f77b219cf3b89fd8224ad0f9806ed47ee51d19954c1281e04ea56dbc356eee07961171ea4b98f6bbca1b111a51d87fe368190a19d90f26dbb50792880413e9c6142b81bb8da3cbb6b8910466b16f1be9e01b91122a59afe236cb36bb5c5deb96cff721f0a174b28049774af6ff38af5ee56c5f4290318fb6c657d68496dc3a0ea7f592cce39822655a0b29026f1522b98470217ed6b7696f227a5c125eec5721c53f600000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000008" + configVars: + targetAddress: "zkProofContractAddr" + privateKey: "walletPrivkey" + - name: run_tasks_concurrent title: "Check chain stability" diff --git a/static_files/assertoor-config/tests/blob-transactions-test.yaml b/static_files/assertoor-config/tests/blob-transactions-test.yaml index 2ba810cd9..1627368ad 100644 --- a/static_files/assertoor-config/tests/blob-transactions-test.yaml +++ b/static_files/assertoor-config/tests/blob-transactions-test.yaml @@ -12,16 +12,17 @@ tasks: minClientCount: 1 # check if all client pairs propose blocks with blob transactions -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with blob transactions" config: - succeedTaskCount: 1 - tasks: - - name: generate_blob_transactions + onBackgroundComplete: fail + backgroundTask: + name: generate_blob_transactions title: "Generate 2 blob transactions per block" config: childWallets: 5 - limitPending: 2 + walletSeed: "blob-test" + limitPending: 4 limitPerBlock: 2 randomTarget: true amount: 1000000 @@ -29,10 +30,10 @@ tasks: blobSidecars: 1 configVars: privateKey: "walletPrivkey" - - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check transaction inclusion with all client pairs" - timeout: 1h + timeout: 30m configVars: matrixValues: "validatorPairNames" config: @@ -40,11 +41,11 @@ tasks: matrixVar: "validatorPairName" task: name: check_consensus_block_proposals - title: "Wait for block proposal with >= 2 blobs from ${validatorPairName}" + title: "Wait for block proposal with >= 1 blobs from ${validatorPairName}" configVars: validatorNamePattern: "validatorPairName" config: - minBlobCount: 2 + minBlobCount: 1 - name: "sleep" title: "Wait 30sec to clear up pending transactions" config: @@ -63,16 +64,17 @@ tasks: title: "Check if blob transactions can be sent via ${clientPairName}" config: tasks: - - name: run_tasks_concurrent + - name: run_task_background title: "Check if blob transactions can be sent via ${clientPairName}" config: - succeedTaskCount: 1 - tasks: - - name: generate_blob_transactions + onBackgroundComplete: fail + backgroundTask: + name: generate_blob_transactions title: "Generate 2 blob transactions per block and send via ${clientPairName}" config: childWallets: 5 - limitPending: 2 + walletSeed: "blob-test" + limitPending: 4 limitPerBlock: 2 randomTarget: true amount: 1000000 @@ -81,10 +83,11 @@ tasks: configVars: privateKey: "walletPrivkey" clientPattern: "clientPairName" - - name: check_consensus_block_proposals - title: "Wait for block proposal with >= 2 blobs" + foregroundTask: + name: check_consensus_block_proposals + title: "Wait for block proposal with >= 1 blobs" config: - minBlobCount: 2 + minBlobCount: 1 - name: "sleep" title: "Wait 30sec to clear up pending transactions" config: diff --git a/static_files/assertoor-config/tests/eoa-transactions-test.yaml b/static_files/assertoor-config/tests/eoa-transactions-test.yaml index e05c37246..2abb9bc16 100644 --- a/static_files/assertoor-config/tests/eoa-transactions-test.yaml +++ b/static_files/assertoor-config/tests/eoa-transactions-test.yaml @@ -12,12 +12,12 @@ tasks: minClientCount: 1 # check if all client pairs propose blocks with legacy EOA transactions -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with legacy EOA transactions" config: - succeedTaskCount: 1 - tasks: - - name: generate_eoa_transactions + onBackgroundComplete: fail + backgroundTask: + name: generate_eoa_transactions title: "Generate 10 EOA transaction per block" config: childWallets: 10 @@ -30,9 +30,10 @@ tasks: configVars: privateKey: "walletPrivkey" - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check transaction inclusion with all client pairs" - timeout: 1h + timeout: 30m configVars: matrixValues: "validatorPairNames" config: @@ -63,12 +64,12 @@ tasks: title: "Check if legacy EOA transactions can be sent via ${clientPairName}" config: tasks: - - name: run_tasks_concurrent + - name: run_task_background title: "Check if legacy EOA transactions can be sent via ${clientPairName}" config: - succeedTaskCount: 1 - tasks: - - name: generate_eoa_transactions + onBackgroundComplete: fail + backgroundTask: + name: generate_eoa_transactions title: "Generate 10 EOA transaction per block and send via ${clientPairName}" config: childWallets: 10 @@ -81,7 +82,9 @@ tasks: configVars: privateKey: "walletPrivkey" clientPattern: "clientPairName" - - name: check_consensus_block_proposals + + foregroundTask: + name: check_consensus_block_proposals title: "Wait for block proposal with >= 5 transactions" config: minTransactionCount: 5 @@ -91,12 +94,12 @@ tasks: duration: 30s # check if all client pairs propose blocks with dynfee EOA transactions -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with dynfee EOA transactions" config: - succeedTaskCount: 1 - tasks: - - name: generate_eoa_transactions + onBackgroundComplete: fail + backgroundTask: + name: generate_eoa_transactions title: "Generate 10 EOA transaction per block" config: childWallets: 10 @@ -108,9 +111,10 @@ tasks: configVars: privateKey: "walletPrivkey" - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check transaction inclusion with all client pairs" - timeout: 1h + timeout: 30m configVars: matrixValues: "validatorPairNames" config: @@ -141,12 +145,12 @@ tasks: title: "Check if dynfee EOA transactions can be sent via ${clientPairName}" config: tasks: - - name: run_tasks_concurrent + - name: run_task_background title: "Check if dynfee EOA transactions can be sent via ${clientPairName}" config: - succeedTaskCount: 1 - tasks: - - name: generate_eoa_transactions + onBackgroundComplete: fail + backgroundTask: + name: generate_eoa_transactions title: "Generate 10 EOA transaction per block and send via ${clientPairName}" config: childWallets: 10 @@ -158,7 +162,8 @@ tasks: configVars: privateKey: "walletPrivkey" clientPattern: "clientPairName" - - name: check_consensus_block_proposals + foregroundTask: + name: check_consensus_block_proposals title: "Wait for block proposal with >= 5 transactions" config: minTransactionCount: 5 diff --git a/static_files/assertoor-config/tests/validator-lifecycle-test.yaml b/static_files/assertoor-config/tests/validator-lifecycle-test.yaml index 4342f56b9..ce3d8446c 100644 --- a/static_files/assertoor-config/tests/validator-lifecycle-test.yaml +++ b/static_files/assertoor-config/tests/validator-lifecycle-test.yaml @@ -19,6 +19,7 @@ tasks: title: "Generate 300 deposits" config: limitTotal: 300 + limitPerSlot: 20 depositContract: "0x4242424242424242424242424242424242424242" configVars: walletPrivkey: "walletPrivkey" @@ -62,11 +63,12 @@ tasks: minUnfinalizedEpochs: 5 # check if all client pairs propose blocks with bls changes during un-finality -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with BLS changes during un-finality" config: - tasks: - - name: generate_bls_changes + onBackgroundComplete: failOrIgnore + backgroundTask: + name: generate_bls_changes title: "Generate 50 BLS changes (1 bls change per slot)" config: limitTotal: 50 @@ -76,7 +78,8 @@ tasks: configVars: mnemonic: "validatorMnemonic" - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check bls change inclusion with all client pairs" timeout: 1h configVars: @@ -93,11 +96,12 @@ tasks: minBlsChangeCount: 1 # check if all client pairs propose blocks with exits during un-finality -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with exits during un-finality" config: - tasks: - - name: generate_exits + onBackgroundComplete: failOrIgnore + backgroundTask: + name: generate_exits title: "Generate 50 Voluntary Exits (1 exit per slot)" config: limitTotal: 50 @@ -106,7 +110,8 @@ tasks: configVars: mnemonic: "validatorMnemonic" - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check exit inclusion with all client pairs" timeout: 1h configVars: @@ -123,11 +128,12 @@ tasks: minExitCount: 1 # check if all client pairs propose blocks with attester slashings -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with attester slashings during un-finality" config: - tasks: - - name: generate_slashings + onBackgroundComplete: failOrIgnore + backgroundTask: + name: generate_slashings title: "Generate 1 attester slashing per slot" config: slashingType: "attester" @@ -138,7 +144,8 @@ tasks: configVars: mnemonic: "validatorMnemonic" - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check slashing inclusion with all client pairs" timeout: 1h configVars: @@ -155,11 +162,12 @@ tasks: minAttesterSlashingCount: 1 # check if all client pairs propose blocks with proposer slashings -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with proposer slashings during un-finality" config: - tasks: - - name: generate_slashings + onBackgroundComplete: failOrIgnore + backgroundTask: + name: generate_slashings title: "Generate 1 proposer slashing per slot" config: slashingType: "proposer" @@ -170,7 +178,8 @@ tasks: configVars: mnemonic: "validatorMnemonic" - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check slashing inclusion with all client pairs" timeout: 1h configVars: @@ -200,11 +209,12 @@ tasks: maxUnfinalizedEpochs: 4 # check if all client pairs propose blocks with bls changes during finality -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with BLS changes during finality" config: - tasks: - - name: generate_bls_changes + onBackgroundComplete: failOrIgnore + backgroundTask: + name: generate_bls_changes title: "Generate 1 bls change per slot" config: startIndex: 150 @@ -215,7 +225,8 @@ tasks: configVars: mnemonic: "validatorMnemonic" - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check bls change inclusion with all client pairs" timeout: 1h configVars: @@ -232,11 +243,12 @@ tasks: minBlsChangeCount: 1 # check if all client pairs propose blocks with exits during finality -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with exits during finality" config: - tasks: - - name: generate_exits + onBackgroundComplete: failOrIgnore + backgroundTask: + name: generate_exits title: "Generate 1 exit per slot" config: startIndex: 150 @@ -246,7 +258,8 @@ tasks: configVars: mnemonic: "validatorMnemonic" - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check exit inclusion with all client pairs" timeout: 1h configVars: @@ -263,11 +276,12 @@ tasks: minExitCount: 1 # check if all client pairs propose blocks with attester slashings -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with attester slashings during finality" config: - tasks: - - name: generate_slashings + onBackgroundComplete: failOrIgnore + backgroundTask: + name: generate_slashings title: "Generate 1 attester slashing per slot" config: slashingType: "attester" @@ -278,7 +292,8 @@ tasks: configVars: mnemonic: "validatorMnemonic" - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check slashing inclusion with all client pairs" timeout: 1h configVars: @@ -295,11 +310,12 @@ tasks: minAttesterSlashingCount: 1 # check if all client pairs propose blocks with proposer slashings -- name: run_tasks_concurrent +- name: run_task_background title: "Check if all clients propose blocks with proposer slashings during finality" config: - tasks: - - name: generate_slashings + onBackgroundComplete: failOrIgnore + backgroundTask: + name: generate_slashings title: "Generate 1 proposer slashing per slot" config: slashingType: "proposer" @@ -310,7 +326,8 @@ tasks: configVars: mnemonic: "validatorMnemonic" - - name: run_task_matrix + foregroundTask: + name: run_task_matrix title: "Check slashing inclusion with all client pairs" timeout: 1h configVars: From 631eaf3e621c90d5b546a1c005d8e31e06263aa4 Mon Sep 17 00:00:00 2001 From: Anton Date: Fri, 9 Feb 2024 10:28:45 +0200 Subject: [PATCH 15/33] feat: Add suave-enabled geth support (#489) Very minor addition to enable running suave-enabled geth (a.k.a. `suave-execution-geth`) instead of the stock geths. --- src/el/geth/geth_launcher.star | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/el/geth/geth_launcher.star b/src/el/geth/geth_launcher.star index 727818f7c..7b0ab6941 100644 --- a/src/el/geth/geth_launcher.star +++ b/src/el/geth/geth_launcher.star @@ -66,6 +66,7 @@ VERBOSITY_LEVELS = { } BUILDER_IMAGE_STR = "builder" +SUAVE_ENABLED_GETH_IMAGE_STR = "suave" def launch( @@ -293,6 +294,13 @@ def get_config( if "--ws.api" in arg: cmd[index] = "--ws.api=admin,engine,net,eth,web3,debug,mev,flashbots" + if SUAVE_ENABLED_GETH_IMAGE_STR in image: + for index, arg in enumerate(cmd): + if "--http.api" in arg: + cmd[index] = "--http.api=admin,engine,net,eth,web3,debug,suavex" + if "--ws.api" in arg: + cmd[index] = "--ws.api=admin,engine,net,eth,web3,debug,suavex" + if network == constants.NETWORK_NAME.kurtosis: if len(existing_el_clients) > 0: cmd.append( From b788b18eead00622ab960a4853c8e24b09c16a26 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Fri, 9 Feb 2024 20:25:09 +0100 Subject: [PATCH 16/33] feat: enable shadowforking (#475) Co-authored-by: pk910 Co-authored-by: parithosh --- .../holesky-shadowfork-verkle.yaml_norun | 17 ++ .github/tests/holesky-shadowfork.yaml_norun | 12 ++ ...devnet-3.yaml => verkle-gen-devnet-4.yaml} | 6 +- .github/tests/verkle-gen.yaml | 4 +- .github/tests/verkle.yaml | 4 +- README.md | 33 +++- network_params.yaml | 9 + src/cl/lighthouse/lighthouse_launcher.star | 13 +- src/cl/lodestar/lodestar_launcher.star | 13 +- src/cl/nimbus/nimbus_launcher.star | 13 +- src/cl/prysm/prysm_launcher.star | 13 +- src/cl/teku/teku_launcher.star | 20 ++- src/el/besu/besu_launcher.star | 13 +- src/el/erigon/erigon_launcher.star | 18 +- src/el/ethereumjs/ethereumjs_launcher.star | 8 +- src/el/geth/geth_launcher.star | 48 ++++-- src/el/nethermind/nethermind_launcher.star | 20 ++- src/el/reth/reth_launcher.star | 9 +- src/package_io/constants.star | 16 ++ src/package_io/input_parser.star | 29 +++- src/participant_network.star | 155 ++++++++++++++++-- .../el_cl_genesis/el_cl_genesis_data.star | 4 + .../el_cl_genesis_generator.star | 40 ++++- src/shared_utils/shared_utils.star | 16 ++ src/static_files/static_files.star | 2 + .../el-cl/values.env.tmpl | 3 + 26 files changed, 419 insertions(+), 119 deletions(-) create mode 100644 .github/tests/holesky-shadowfork-verkle.yaml_norun create mode 100644 .github/tests/holesky-shadowfork.yaml_norun rename .github/tests/{verkle-gen-devnet-3.yaml => verkle-gen-devnet-4.yaml} (62%) diff --git a/.github/tests/holesky-shadowfork-verkle.yaml_norun b/.github/tests/holesky-shadowfork-verkle.yaml_norun new file mode 100644 index 000000000..84c79cad6 --- /dev/null +++ b/.github/tests/holesky-shadowfork-verkle.yaml_norun @@ -0,0 +1,17 @@ +participants: + - el_client_type: geth + el_client_image: ethpandaops/geth:transition-post-genesis-04b0304 + cl_client_type: lighthouse + cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 + - el_client_type: geth + el_client_image: ethpandaops/geth:transition-post-genesis-04b0304 + cl_client_type: lodestar + cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b +network_params: + electra_fork_epoch: 1 + network: holesky-shadowfork-verkle + genesis_delay: 300 +additional_services: + - dora +snooper_enabled: true +persistent: true diff --git a/.github/tests/holesky-shadowfork.yaml_norun b/.github/tests/holesky-shadowfork.yaml_norun new file mode 100644 index 000000000..f26bd9c8c --- /dev/null +++ b/.github/tests/holesky-shadowfork.yaml_norun @@ -0,0 +1,12 @@ +participants: + - el_client_type: geth + el_client_image: ethereum/client-go:v1.13.11 + cl_client_type: teku + cl_client_image: consensys/teku:24.1.1 +network_params: + dencun_fork_epoch: 1 + network: holesky-shadowfork +additional_services: + - dora +snooper_enabled: true +persistent: true diff --git a/.github/tests/verkle-gen-devnet-3.yaml b/.github/tests/verkle-gen-devnet-4.yaml similarity index 62% rename from .github/tests/verkle-gen-devnet-3.yaml rename to .github/tests/verkle-gen-devnet-4.yaml index 818ae0bcd..2416a7a92 100644 --- a/.github/tests/verkle-gen-devnet-3.yaml +++ b/.github/tests/verkle-gen-devnet-4.yaml @@ -1,13 +1,13 @@ participants: - el_client_type: geth - el_client_image: ethpandaops/geth:kaustinen-with-shapella-6d7b22c + el_client_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd cl_client_type: lighthouse cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 count: 2 - el_client_type: geth - el_client_image: ethpandaops/geth:kaustinen-with-shapella-6d7b22c + el_client_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd cl_client_type: lodestar cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b network_params: - network: verkle-gen-devnet-3 + network: verkle-gen-devnet-4 diff --git a/.github/tests/verkle-gen.yaml b/.github/tests/verkle-gen.yaml index 5164562da..bc50e1f07 100644 --- a/.github/tests/verkle-gen.yaml +++ b/.github/tests/verkle-gen.yaml @@ -1,11 +1,11 @@ participants: - el_client_type: geth - el_client_image: ethpandaops/geth:kaustinen-with-shapella-6d7b22c + el_client_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd cl_client_type: lighthouse cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 count: 2 - el_client_type: geth - el_client_image: ethpandaops/geth:kaustinen-with-shapella-6d7b22c + el_client_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd cl_client_type: lodestar cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b count: 2 diff --git a/.github/tests/verkle.yaml b/.github/tests/verkle.yaml index dc9de4217..fbcfe87c0 100644 --- a/.github/tests/verkle.yaml +++ b/.github/tests/verkle.yaml @@ -1,11 +1,11 @@ participants: - el_client_type: geth - el_client_image: ethpandaops/geth:transition-post-genesis-1d80ebd + el_client_image: ethpandaops/geth:transition-post-genesis-04b0304 cl_client_type: lighthouse cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 count: 2 - el_client_type: geth - el_client_image: ethpandaops/geth:transition-post-genesis-1d80ebd + el_client_image: ethpandaops/geth:transition-post-genesis-04b0304 cl_client_type: lodestar cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b network_params: diff --git a/README.md b/README.md index 1c5a832cd..3bd75fcf1 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,27 @@ To mitigate these issues, you can use the `el_client_volume_size` and `cl_client For optimal performance, we recommend using a cloud provider that allows you to provision Kubernetes clusters with fast persistent storage or self hosting your own Kubernetes cluster with fast persistent storage. +### Shadowforking +In order to enable shadowfork capabilities, you can use the `network_params.network` flag. The expected value is the name of the network you want to shadowfork followed by `-shadowfork`. Please note that `persistent` configuration parameter has to be enabled for shadowforks to work! Current limitation on k8s is it is only working on a single node cluster. For example, to shadowfork the Holesky testnet, you can use the following command: +```yaml +... +network_params: + network: "holesky-shadowfork" +persistent: true +... +``` + +##### Shadowforking custom verkle networks +In order to enable shadowfork capabilities for verkle networks, you need to define electra and mention verkle in the network name after shadowfork. +```yaml +... +network_params: + electra_fork_epoch: 1 + network: "holesky-shadowfork-verkle" +persistent: true +... +``` + #### Taints and tolerations It is possible to run the package on a Kubernetes cluster with taints and tolerations. This is done by adding the tolerations to the `tolerations` field in the `network_params.yaml` file. For example: ```yaml @@ -218,7 +239,7 @@ participants: # effect: "NoSchedule" # toleration_seconds: 3600 # Defaults to empty - el_tolerations: [] + cl_tolerations: [] # A list of tolerations that will be passed to the validator container # Only works with Kubernetes @@ -362,7 +383,7 @@ network_params: # Defaults to 2048 eth1_follow_distance: 2048 - # The epoch at which the capella and deneb forks are set to occur. + # The epoch at which the capella/deneb/electra forks are set to occur. capella_fork_epoch: 0 deneb_fork_epoch: 500 electra_fork_epoch: null @@ -373,6 +394,14 @@ network_params: # You can sync any devnet by setting this to the network name (e.g. "dencun-devnet-12", "verkle-gen-devnet-2") network: "kurtosis" + # The number of epochs to wait validators to be able to withdraw + # Defaults to 256 epochs ~27 hours + min_validator_withdrawability_delay: 256 + + # The period of the shard committee + # Defaults to 256 epoch ~27 hours + shard_committee_period: 256 + # Configuration place for transaction spammer - https:#github.com/MariusVanDerWijden/tx-fuzz tx_spammer_params: # A list of optional extra params that will be passed to the TX Spammer container for modifying its behaviour diff --git a/network_params.yaml b/network_params.yaml index 8f533ac35..2c025b322 100644 --- a/network_params.yaml +++ b/network_params.yaml @@ -4,9 +4,13 @@ participants: el_client_log_level: "" el_extra_params: [] el_extra_labels: {} + el_tolerations: [] cl_client_type: lighthouse cl_client_image: sigp/lighthouse:latest cl_client_log_level: "" + cl_tolerations: [] + validator_tolerations: [] + tolerations: [] beacon_extra_params: [] beacon_extra_labels: {} validator_extra_params: [] @@ -50,6 +54,10 @@ network_params: capella_fork_epoch: 0 deneb_fork_epoch: 4 electra_fork_epoch: null + network: kurtosis + min_validator_withdrawability_delay: 256 + shard_committee_period: 256 + additional_services: - tx_spammer - blob_spammer @@ -78,3 +86,4 @@ mev_params: grafana_additional_dashboards: [] persistent: false xatu_sentry_enabled: false +global_tolerations: [] diff --git a/src/cl/lighthouse/lighthouse_launcher.star b/src/cl/lighthouse/lighthouse_launcher.star index ed7096e84..53b68d9ff 100644 --- a/src/cl/lighthouse/lighthouse_launcher.star +++ b/src/cl/lighthouse/lighthouse_launcher.star @@ -138,13 +138,7 @@ def launch( cl_tolerations, participant_tolerations, global_tolerations ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + network_name = shared_utils.get_network_name(launcher.network) bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU bn_max_cpu = ( @@ -381,7 +375,10 @@ def get_beacon_config( if network not in constants.PUBLIC_NETWORKS: cmd.append("--testnet-dir=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER) - if network == constants.NETWORK_NAME.kurtosis: + if ( + network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network + ): if boot_cl_client_ctxs != None: cmd.append( "--boot-nodes=" diff --git a/src/cl/lodestar/lodestar_launcher.star b/src/cl/lodestar/lodestar_launcher.star index 243218f4b..f12b6c473 100644 --- a/src/cl/lodestar/lodestar_launcher.star +++ b/src/cl/lodestar/lodestar_launcher.star @@ -116,13 +116,7 @@ def launch( cl_tolerations, participant_tolerations, global_tolerations ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + network_name = shared_utils.get_network_name(launcher.network) bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU bn_max_cpu = ( @@ -349,7 +343,10 @@ def get_beacon_config( + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.ssz" ) - if network == constants.NETWORK_NAME.kurtosis: + if ( + network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network + ): if bootnode_contexts != None: cmd.append( "--bootnodes=" diff --git a/src/cl/nimbus/nimbus_launcher.star b/src/cl/nimbus/nimbus_launcher.star index 8148725c2..0c066d917 100644 --- a/src/cl/nimbus/nimbus_launcher.star +++ b/src/cl/nimbus/nimbus_launcher.star @@ -152,13 +152,7 @@ def launch( cl_tolerations, participant_tolerations, global_tolerations ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + network_name = shared_utils.get_network_name(launcher.network) bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU bn_max_cpu = ( @@ -394,7 +388,10 @@ def get_beacon_config( + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/bootstrap_nodes.txt" ) - if network == constants.NETWORK_NAME.kurtosis: + if ( + network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network + ): if bootnode_contexts == None: cmd.append("--subscribe-all-subnets") else: diff --git a/src/cl/prysm/prysm_launcher.star b/src/cl/prysm/prysm_launcher.star index 90b95cd65..dd86ce1cc 100644 --- a/src/cl/prysm/prysm_launcher.star +++ b/src/cl/prysm/prysm_launcher.star @@ -143,13 +143,7 @@ def launch( cl_tolerations, participant_tolerations, global_tolerations ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + network_name = shared_utils.get_network_name(launcher.network) bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU bn_max_cpu = ( @@ -360,7 +354,10 @@ def get_beacon_config( + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.ssz", ) - if network == constants.NETWORK_NAME.kurtosis: + if ( + network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network + ): if bootnode_contexts != None: for ctx in bootnode_contexts[: constants.MAX_ENR_ENTRIES]: cmd.append("--peer=" + ctx.multiaddr) diff --git a/src/cl/teku/teku_launcher.star b/src/cl/teku/teku_launcher.star index 24d6d95d0..0fd47bd5d 100644 --- a/src/cl/teku/teku_launcher.star +++ b/src/cl/teku/teku_launcher.star @@ -152,13 +152,7 @@ def launch( int(bn_max_mem) if int(bn_max_mem) > 0 else holesky_beacon_memory_limit ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + network_name = shared_utils.get_network_name(launcher.network) bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU bn_max_cpu = ( @@ -400,7 +394,10 @@ def get_beacon_config( + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.ssz" ) - if network == constants.NETWORK_NAME.kurtosis: + if ( + network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network + ): if bootnode_contexts != None: cmd.append( "--p2p-discovery-bootnodes=" @@ -430,6 +427,13 @@ def get_beacon_config( plan, el_cl_genesis_data.files_artifact_uuid ) ) + elif constants.NETWORK_NAME.shadowfork in network: + cmd.append( + "--p2p-discovery-bootnodes=" + + shared_utils.get_devnet_enrs_list( + plan, el_cl_genesis_data.files_artifact_uuid + ) + ) else: # Devnets # TODO Remove once checkpoint sync is working for verkle if constants.NETWORK_NAME.verkle not in network: diff --git a/src/el/besu/besu_launcher.star b/src/el/besu/besu_launcher.star index 12a282b27..ac117880f 100644 --- a/src/el/besu/besu_launcher.star +++ b/src/el/besu/besu_launcher.star @@ -87,13 +87,7 @@ def launch( el_tolerations, participant_tolerations, global_tolerations ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + network_name = shared_utils.get_network_name(launcher.network) el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU el_max_cpu = ( @@ -208,7 +202,10 @@ def get_config( "--metrics-host=0.0.0.0", "--metrics-port={0}".format(METRICS_PORT_NUM), ] - if network not in constants.PUBLIC_NETWORKS: + if ( + network not in constants.PUBLIC_NETWORKS + or constants.NETWORK_NAME.shadowfork in network + ): cmd.append( "--genesis-file=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER diff --git a/src/el/erigon/erigon_launcher.star b/src/el/erigon/erigon_launcher.star index cc503ec2e..cfee870c0 100644 --- a/src/el/erigon/erigon_launcher.star +++ b/src/el/erigon/erigon_launcher.star @@ -87,13 +87,7 @@ def launch( el_tolerations, participant_tolerations, global_tolerations ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + network_name = shared_utils.get_network_name(launcher.network) el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU el_max_cpu = ( @@ -134,6 +128,7 @@ def launch( extra_params, extra_env_vars, extra_labels, + launcher.cancun_time, persistent, el_volume_size, tolerations, @@ -181,6 +176,7 @@ def get_config( extra_params, extra_env_vars, extra_labels, + cancun_time, persistent, el_volume_size, tolerations, @@ -195,6 +191,11 @@ def get_config( "--chain={0}".format( network if network in constants.PUBLIC_NETWORKS else "dev" ), + "{0}".format( + "--override.cancun=" + str(cancun_time) + if constants.NETWORK_NAME.shadowfork in network + else "" + ), "--networkid={0}".format(networkid), "--log.console.verbosity=" + verbosity_level, "--datadir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, @@ -296,10 +297,11 @@ def get_config( ) -def new_erigon_launcher(el_cl_genesis_data, jwt_file, network, networkid): +def new_erigon_launcher(el_cl_genesis_data, jwt_file, network, networkid, cancun_time): return struct( el_cl_genesis_data=el_cl_genesis_data, jwt_file=jwt_file, network=network, networkid=networkid, + cancun_time=cancun_time, ) diff --git a/src/el/ethereumjs/ethereumjs_launcher.star b/src/el/ethereumjs/ethereumjs_launcher.star index 55a9a926e..aa0eaeb63 100644 --- a/src/el/ethereumjs/ethereumjs_launcher.star +++ b/src/el/ethereumjs/ethereumjs_launcher.star @@ -89,13 +89,7 @@ def launch( el_tolerations, participant_tolerations, global_tolerations ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + network_name = shared_utils.get_network_name(launcher.network) el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU el_max_cpu = ( diff --git a/src/el/geth/geth_launcher.star b/src/el/geth/geth_launcher.star index 7b0ab6941..830beb98e 100644 --- a/src/el/geth/geth_launcher.star +++ b/src/el/geth/geth_launcher.star @@ -98,13 +98,7 @@ def launch( el_tolerations, participant_tolerations, global_tolerations ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + network_name = shared_utils.get_network_name(launcher.network) el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU el_max_cpu = ( @@ -147,7 +141,8 @@ def launch( extra_labels, launcher.capella_fork_epoch, launcher.electra_fork_epoch, - launcher.final_genesis_timestamp, + launcher.cancun_time, + launcher.prague_time, persistent, el_volume_size, tolerations, @@ -197,19 +192,22 @@ def get_config( extra_labels, capella_fork_epoch, electra_fork_epoch, - final_genesis_timestamp, + cancun_time, + prague_time, persistent, el_volume_size, tolerations, ): # TODO: Remove this once electra fork has path based storage scheme implemented - if electra_fork_epoch != None or constants.NETWORK_NAME.verkle in network: + if ( + electra_fork_epoch != None or constants.NETWORK_NAME.verkle in network + ) and constants.NETWORK_NAME.shadowfork not in network: if ( electra_fork_epoch == 0 or constants.NETWORK_NAME.verkle + "-gen" in network ): # verkle-gen init_datadir_cmd_str = "geth --datadir={0} --cache.preimages --override.prague={1} init {2}".format( EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, - final_genesis_timestamp, + prague_time, constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.json", ) else: # verkle @@ -224,6 +222,8 @@ def get_config( EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.json", ) + elif constants.NETWORK_NAME.shadowfork in network: + init_datadir_cmd_str = "echo shadowfork" else: init_datadir_cmd_str = "geth init --state.scheme=path --datadir={0} {1}".format( EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, @@ -239,6 +239,7 @@ def get_config( "--state.scheme=path" if electra_fork_epoch == None and "verkle" not in network + and constants.NETWORK_NAME.shadowfork not in network # for now and "--builder" not in extra_params and capella_fork_epoch == 0 else "" @@ -251,13 +252,18 @@ def get_config( ), # Override prague fork timestamp if electra_fork_epoch == 0 "{0}".format( - "--override.prague=" + final_genesis_timestamp + "--override.prague=" + str(prague_time) if electra_fork_epoch == 0 or "verkle-gen" in network else "" ), "{0}".format( "--{}".format(network) if network in constants.PUBLIC_NETWORKS else "" ), + "{0}".format( + "--override.cancun=" + str(cancun_time) + if constants.NETWORK_NAME.shadowfork in network + else "" + ), "--networkid={0}".format(networkid), "--verbosity=" + verbosity_level, "--datadir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, @@ -301,7 +307,10 @@ def get_config( if "--ws.api" in arg: cmd[index] = "--ws.api=admin,engine,net,eth,web3,debug,suavex" - if network == constants.NETWORK_NAME.kurtosis: + if ( + network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network + ): if len(existing_el_clients) > 0: cmd.append( "--bootnodes=" @@ -312,6 +321,13 @@ def get_config( ] ) ) + if ( + constants.NETWORK_NAME.shadowfork in network and "verkle" in network + ): # verkle shadowfork + cmd.append("--override.prague=" + str(prague_time)) + cmd.append("--override.overlay-stride=10000") + cmd.append("--override.blockproof=true") + cmd.append("--clear.verkle.costs=true") elif network not in constants.PUBLIC_NETWORKS: cmd.append( "--bootnodes=" @@ -371,8 +387,9 @@ def new_geth_launcher( jwt_file, network, networkid, - final_genesis_timestamp, capella_fork_epoch, + cancun_time, + prague_time, electra_fork_epoch=None, ): return struct( @@ -380,7 +397,8 @@ def new_geth_launcher( jwt_file=jwt_file, network=network, networkid=networkid, - final_genesis_timestamp=final_genesis_timestamp, capella_fork_epoch=capella_fork_epoch, + cancun_time=cancun_time, + prague_time=prague_time, electra_fork_epoch=electra_fork_epoch, ) diff --git a/src/el/nethermind/nethermind_launcher.star b/src/el/nethermind/nethermind_launcher.star index e59bfcf1e..ba7be24a7 100644 --- a/src/el/nethermind/nethermind_launcher.star +++ b/src/el/nethermind/nethermind_launcher.star @@ -85,13 +85,7 @@ def launch( el_tolerations, participant_tolerations, global_tolerations ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + network_name = shared_utils.get_network_name(launcher.network) el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU el_max_cpu = ( @@ -208,10 +202,20 @@ def get_config( + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/chainspec.json" ) + elif constants.NETWORK_NAME.shadowfork in network: + cmd.append( + "--Init.ChainSpecPath=" + + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + + "/chainspec.json" + ) + cmd.append("--config=" + network) else: cmd.append("--config=" + network) - if network == constants.NETWORK_NAME.kurtosis: + if ( + network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network + ): if len(existing_el_clients) > 0: cmd.append( "--Network.StaticPeers=" diff --git a/src/el/reth/reth_launcher.star b/src/el/reth/reth_launcher.star index 411f8ee15..8faf1d6ed 100644 --- a/src/el/reth/reth_launcher.star +++ b/src/el/reth/reth_launcher.star @@ -87,13 +87,8 @@ def launch( tolerations = input_parser.get_client_tolerations( el_tolerations, participant_tolerations, global_tolerations ) - network_name = ( - "devnets" - if launcher.network != "kurtosis" - and launcher.network != "ephemery" - and launcher.network not in constants.PUBLIC_NETWORKS - else launcher.network - ) + + network_name = shared_utils.get_network_name(launcher.network) el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU el_max_cpu = ( diff --git a/src/package_io/constants.star b/src/package_io/constants.star index 150e89d6d..2e9f1c06e 100644 --- a/src/package_io/constants.star +++ b/src/package_io/constants.star @@ -53,6 +53,13 @@ CAPELLA_FORK_VERSION = "0x40000038" DENEB_FORK_VERSION = "0x50000038" ELECTRA_FORK_VERSION = "0x60000038" +ETHEREUM_GENESIS_GENERATOR = struct( + bellatrix_genesis="ethpandaops/ethereum-genesis-generator:1.3.15", # EOL + capella_genesis="ethpandaops/ethereum-genesis-generator:2.0.12", # Default + verkle_support_genesis="ethpandaops/ethereum-genesis-generator:3.0.0-rc.19", # soon to be deneb genesis + verkle_genesis="ethpandaops/ethereum-genesis-generator:4.0.0-rc.6", +) + NETWORK_NAME = struct( mainnet="mainnet", goerli="goerli", @@ -61,6 +68,7 @@ NETWORK_NAME = struct( ephemery="ephemery", kurtosis="kurtosis", verkle="verkle", + shadowfork="shadowfork", ) PUBLIC_NETWORKS = ( @@ -92,6 +100,14 @@ GENESIS_VALIDATORS_ROOT = { "holesky": "0x9143aa7c615a7f7115e2b6aac319c03529df8242ae705fba9df39b79c59fa8b1", } +DEPOSIT_CONTRACT_ADDRESS = { + "mainnet": "0x00000000219ab540356cBB839Cbe05303d7705Fa", + "goerli": "0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b", + "sepolia": "0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D", + "holesky": "0x4242424242424242424242424242424242424242", + "ephemery": "0x4242424242424242424242424242424242424242", +} + GENESIS_TIME = { "mainnet": 1606824023, "goerli": 1616508000, diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index 211a9175f..4be39d044 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -30,7 +30,7 @@ NIMBUS_NODE_NAME = "nimbus" # Placeholder value for the deneb fork epoch if electra is being run # TODO: This is a hack, and should be removed once we electra is rebased on deneb -HIGH_DENEB_VALUE_FORK_VERKLE = 20000 +HIGH_DENEB_VALUE_FORK_VERKLE = 2000000000 # MEV Params FLASHBOTS_MEV_BOOST_PORT = 18550 @@ -66,7 +66,10 @@ def input_parser(plan, input_args): # add default eth2 input params result["mev_type"] = None result["mev_params"] = get_default_mev_params() - if result["network_params"]["network"] == "kurtosis": + if ( + result["network_params"]["network"] == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in result["network_params"]["network"] + ): result["additional_services"] = DEFAULT_ADDITIONAL_SERVICES else: result["additional_services"] = [] @@ -80,6 +83,12 @@ def input_parser(plan, input_args): result["persistent"] = False result["global_tolerations"] = [] + if constants.NETWORK_NAME.shadowfork in result["network_params"]["network"]: + shadow_base = result["network_params"]["network"].split("-shadowfork")[0] + result["network_params"][ + "deposit_contract_address" + ] = constants.DEPOSIT_CONTRACT_ADDRESS[shadow_base] + for attr in input_args: value = input_args[attr] # if its inserted we use the value inserted @@ -209,6 +218,10 @@ def input_parser(plan, input_args): deneb_fork_epoch=result["network_params"]["deneb_fork_epoch"], electra_fork_epoch=result["network_params"]["electra_fork_epoch"], network=result["network_params"]["network"], + min_validator_withdrawability_delay=result["network_params"][ + "min_validator_withdrawability_delay" + ], + shard_committee_period=result["network_params"]["shard_committee_period"], ), mev_params=struct( mev_relay_image=result["mev_params"]["mev_relay_image"], @@ -406,7 +419,10 @@ def parse_network_params(input_args): "deposit_contract_address is empty or spaces it needs to be of non zero length" ) - if result["network_params"]["network"] == "kurtosis": + if ( + result["network_params"]["network"] == "kurtosis" + or constants.NETWORK_NAME.shadowfork in result["network_params"]["network"] + ): if ( result["network_params"]["preregistered_validator_keys_mnemonic"].strip() == "" @@ -431,7 +447,10 @@ def parse_network_params(input_args): ): fail("electra can only happen with capella genesis not bellatrix") - if result["network_params"]["network"] == "kurtosis": + if ( + result["network_params"]["network"] == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in result["network_params"]["network"] + ): if MIN_VALIDATORS > actual_num_validators: fail( "We require at least {0} validators but got {1}".format( @@ -533,6 +552,8 @@ def default_network_params(): "deneb_fork_epoch": 500, "electra_fork_epoch": None, "network": "kurtosis", + "min_validator_withdrawability_delay": 256, + "shard_committee_period": 256, } diff --git a/src/participant_network.star b/src/participant_network.star index 456f187e8..2b159e7d2 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -8,6 +8,9 @@ el_cl_genesis_data_generator = import_module( el_cl_genesis_data = import_module( "./prelaunch_data_generator/el_cl_genesis/el_cl_genesis_data.star" ) + +input_parser = import_module("./package_io/input_parser.star") + shared_utils = import_module("./shared_utils/shared_utils.star") static_files = import_module("./static_files/static_files.star") @@ -67,9 +70,127 @@ def launch_participant_network( global_tolerations, parallel_keystore_generation=False, ): + network_id = network_params.network_id num_participants = len(participants) - if network_params.network == constants.NETWORK_NAME.kurtosis: - # We are running a kurtosis network + latest_block = "" + cancun_time = 0 + prague_time = 0 + shadowfork_block = "latest" + if ( + constants.NETWORK_NAME.shadowfork in network_params.network + and ("verkle" in network_params.network) + and ("holesky" in network_params.network) + ): + shadowfork_block = "793312" # Hardcodes verkle shadowfork block for holesky + + if ( + network_params.network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network_params.network + ): + if ( + constants.NETWORK_NAME.shadowfork in network_params.network + ): # shadowfork requires some preparation + base_network = shared_utils.get_network_name(network_params.network) + # overload the network name to remove the shadowfork suffix + if constants.NETWORK_NAME.ephemery in base_network: + chain_id = plan.run_sh( + run="curl -s https://ephemery.dev/latest/config.yaml | yq .DEPOSIT_CHAIN_ID | tr -d '\n'", + image="linuxserver/yq", + ) + network_id = chain_id.output + else: + network_id = constants.NETWORK_ID[ + base_network + ] # overload the network id to match the network name + latest_block = plan.run_sh( # fetch the latest block + run="mkdir -p /shadowfork && \ + curl -o /shadowfork/latest_block.json https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/" + + base_network + + "/geth/" + + shadowfork_block + + "/_snapshot_eth_getBlockByNumber.json", + image="badouralix/curl-jq", + store=[StoreSpec(src="/shadowfork", name="latest_blocks")], + ) + + # maybe we can do the copy in the same step as the fetch? + for index, participant in enumerate(participants): + tolerations = input_parser.get_client_tolerations( + participant.el_tolerations, + participant.tolerations, + global_tolerations, + ) + cl_client_type = participant.cl_client_type + el_client_type = participant.el_client_type + + # Zero-pad the index using the calculated zfill value + index_str = shared_utils.zfill_custom( + index + 1, len(str(len(participants))) + ) + + el_service_name = "el-{0}-{1}-{2}".format( + index_str, el_client_type, cl_client_type + ) + shadowfork_data = plan.add_service( + name="shadowfork-{0}".format(el_service_name), + config=ServiceConfig( + image="alpine:3.19.1", + cmd=[ + "apk add --no-cache curl tar zstd && curl -s -L https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/" + + base_network + + "/" + + el_client_type + + "/" + + shadowfork_block + + "/snapshot.tar.zst" + + " | tar -I zstd -xvf - -C /data/" + + el_client_type + + "/execution-data" + + " && touch /tmp/finished" + + " && tail -f /dev/null" + ], + entrypoint=["/bin/sh", "-c"], + files={ + "/data/" + + el_client_type + + "/execution-data": Directory( + persistent_key="data-{0}".format(el_service_name), + size=constants.VOLUME_SIZE[base_network][ + el_client_type + "_volume_size" + ], + ), + }, + env_vars={ + "RCLONE_CONFIG_MYS3_TYPE": "s3", + "RCLONE_CONFIG_MYS3_PROVIDER": "DigitalOcean", + "RCLONE_CONFIG_MYS3_ENDPOINT": "https://ams3.digitaloceanspaces.com", + }, + tolerations=tolerations, + ), + ) + for index, participant in enumerate(participants): + cl_client_type = participant.cl_client_type + el_client_type = participant.el_client_type + + # Zero-pad the index using the calculated zfill value + index_str = shared_utils.zfill_custom( + index + 1, len(str(len(participants))) + ) + + el_service_name = "el-{0}-{1}-{2}".format( + index_str, el_client_type, cl_client_type + ) + plan.wait( + service_name="shadowfork-{0}".format(el_service_name), + recipe=ExecRecipe(command=["cat", "/tmp/finished"]), + field="code", + assertion="==", + target_value=0, + interval="1s", + timeout="6h", # 6 hours should be enough for the biggest network + ) + + # We are running a kurtosis or shadowfork network plan.print("Generating cl validator key stores") validator_data = None if not parallel_keystore_generation: @@ -87,8 +208,6 @@ def launch_participant_network( plan.print(json.indent(json.encode(validator_data))) - network_id = network_params.network_id - # We need to send the same genesis time to both the EL and the CL to ensure that timestamp based forking works as expected final_genesis_timestamp = get_final_genesis_timestamp( plan, @@ -105,13 +224,14 @@ def launch_participant_network( total_number_of_validator_keys += participant.validator_count plan.print("Generating EL CL data") + # we are running bellatrix genesis (deprecated) - will be removed in the future if ( network_params.capella_fork_epoch > 0 and network_params.electra_fork_epoch == None ): ethereum_genesis_generator_image = ( - "ethpandaops/ethereum-genesis-generator:1.3.15" + constants.ETHEREUM_GENESIS_GENERATOR.bellatrix_genesis ) # we are running capella genesis - default behavior elif ( @@ -119,17 +239,17 @@ def launch_participant_network( and network_params.electra_fork_epoch == None ): ethereum_genesis_generator_image = ( - "ethpandaops/ethereum-genesis-generator:2.0.11" + constants.ETHEREUM_GENESIS_GENERATOR.capella_genesis ) # we are running electra - experimental elif network_params.electra_fork_epoch != None: if network_params.electra_fork_epoch == 0: ethereum_genesis_generator_image = ( - "ethpandaops/ethereum-genesis-generator:4.0.0-rc.6" + constants.ETHEREUM_GENESIS_GENERATOR.verkle_genesis ) else: ethereum_genesis_generator_image = ( - "ethpandaops/ethereum-genesis-generator:3.0.0-rc.18" + constants.ETHEREUM_GENESIS_GENERATOR.verkle_support_genesis ) else: fail( @@ -145,7 +265,7 @@ def launch_participant_network( ethereum_genesis_generator_image, el_cl_genesis_config_template, final_genesis_timestamp, - network_params.network_id, + network_id, network_params.deposit_contract_address, network_params.seconds_per_slot, network_params.preregistered_validator_keys_mnemonic, @@ -157,6 +277,9 @@ def launch_participant_network( network_params.capella_fork_epoch, network_params.deneb_fork_epoch, network_params.electra_fork_epoch, + latest_block.files_artifacts[0] if latest_block != "" else "", + network_params.min_validator_withdrawability_delay, + network_params.shard_committee_period, ) elif network_params.network in constants.PUBLIC_NETWORKS: # We are running a public network @@ -167,6 +290,8 @@ def launch_participant_network( el_cl_data = el_cl_genesis_data.new_el_cl_genesis_data( dummy.files_artifacts[0], constants.GENESIS_VALIDATORS_ROOT[network_params.network], + cancun_time, + prague_time, ) final_genesis_timestamp = constants.GENESIS_TIME[network_params.network] network_id = constants.NETWORK_ID[network_params.network] @@ -184,6 +309,8 @@ def launch_participant_network( el_cl_data = el_cl_genesis_data.new_el_cl_genesis_data( el_cl_genesis_data_uuid.files_artifacts[0], genesis_validators_root, + cancun_time, + prague_time, ) final_genesis_timestamp = shared_utils.read_genesis_timestamp_from_config( plan, el_cl_genesis_data_uuid.files_artifacts[0] @@ -209,6 +336,8 @@ def launch_participant_network( el_cl_data = el_cl_genesis_data.new_el_cl_genesis_data( el_cl_genesis_data_uuid.files_artifacts[0], genesis_validators_root, + cancun_time, + prague_time, ) final_genesis_timestamp = shared_utils.read_genesis_timestamp_from_config( plan, el_cl_genesis_data_uuid.files_artifacts[0] @@ -225,8 +354,9 @@ def launch_participant_network( jwt_file, network_params.network, network_id, - final_genesis_timestamp, network_params.capella_fork_epoch, + el_cl_data.cancun_time, + el_cl_data.prague_time, network_params.electra_fork_epoch, ), "launch_method": geth.launch, @@ -237,8 +367,9 @@ def launch_participant_network( jwt_file, network_params.network, network_id, - final_genesis_timestamp, network_params.capella_fork_epoch, + el_cl_data.cancun_time, + el_cl_data.prague_time, network_params.electra_fork_epoch, ), "launch_method": geth.launch, @@ -257,6 +388,7 @@ def launch_participant_network( jwt_file, network_params.network, network_id, + el_cl_data.cancun_time, ), "launch_method": erigon.launch, }, @@ -399,6 +531,7 @@ def launch_participant_network( preregistered_validator_keys_for_nodes = ( validator_data.per_node_keystores if network_params.network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network_params.network else None ) diff --git a/src/prelaunch_data_generator/el_cl_genesis/el_cl_genesis_data.star b/src/prelaunch_data_generator/el_cl_genesis/el_cl_genesis_data.star index 8e0a834f7..154f89728 100644 --- a/src/prelaunch_data_generator/el_cl_genesis/el_cl_genesis_data.star +++ b/src/prelaunch_data_generator/el_cl_genesis/el_cl_genesis_data.star @@ -1,8 +1,12 @@ def new_el_cl_genesis_data( files_artifact_uuid, genesis_validators_root, + cancun_time, + prague_time, ): return struct( files_artifact_uuid=files_artifact_uuid, genesis_validators_root=genesis_validators_root, + cancun_time=cancun_time, + prague_time=prague_time, ) diff --git a/src/prelaunch_data_generator/el_cl_genesis/el_cl_genesis_generator.star b/src/prelaunch_data_generator/el_cl_genesis/el_cl_genesis_generator.star index d4093c146..d2919bfd5 100644 --- a/src/prelaunch_data_generator/el_cl_genesis/el_cl_genesis_generator.star +++ b/src/prelaunch_data_generator/el_cl_genesis/el_cl_genesis_generator.star @@ -6,6 +6,7 @@ constants = import_module("../../package_io/constants.star") GENESIS_VALUES_PATH = "/opt" GENESIS_VALUES_FILENAME = "values.env" +SHADOWFORK_FILEPATH = "/shadowfork" def generate_el_cl_genesis_data( @@ -25,7 +26,16 @@ def generate_el_cl_genesis_data( capella_fork_epoch, deneb_fork_epoch, electra_fork_epoch, + latest_block, + min_validator_withdrawability_delay, + shard_committee_period, ): + files = {} + shadowfork_file = "" + if latest_block != "": + files[SHADOWFORK_FILEPATH] = latest_block + shadowfork_file = SHADOWFORK_FILEPATH + "/shadowfork/latest_block.json" + template_data = new_env_file_for_el_cl_genesis_data( genesis_unix_timestamp, network_id, @@ -40,6 +50,9 @@ def generate_el_cl_genesis_data( capella_fork_epoch, deneb_fork_epoch, electra_fork_epoch, + shadowfork_file, + min_validator_withdrawability_delay, + shard_committee_period, ) genesis_generation_template = shared_utils.new_template_and_data( genesis_generation_config_yml_template, template_data @@ -55,10 +68,12 @@ def generate_el_cl_genesis_data( genesis_values_and_dest_filepath, "genesis-el-cl-env-file" ) + files[GENESIS_VALUES_PATH] = genesis_generation_config_artifact_name + genesis = plan.run_sh( run="cp /opt/values.env /config/values.env && ./entrypoint.sh all && mkdir /network-configs && mv /data/custom_config_data/* /network-configs/", image=image, - files={GENESIS_VALUES_PATH: genesis_generation_config_artifact_name}, + files=files, store=[ StoreSpec(src="/network-configs/", name="el_cl_genesis_data"), StoreSpec( @@ -75,8 +90,23 @@ def generate_el_cl_genesis_data( wait=None, ) + cancun_time = plan.run_sh( + run="jq .config.cancunTime /data/network-configs/genesis.json | tr -d '\n'", + image="badouralix/curl-jq", + files={"/data": genesis.files_artifacts[0]}, + ) + + prague_time = plan.run_sh( + run="jq .config.pragueTime /data/network-configs/genesis.json | tr -d '\n'", + image="badouralix/curl-jq", + files={"/data": genesis.files_artifacts[0]}, + ) + result = el_cl_genesis_data.new_el_cl_genesis_data( - genesis.files_artifacts[0], genesis_validators_root.output + genesis.files_artifacts[0], + genesis_validators_root.output, + cancun_time.output, + prague_time.output, ) return result @@ -96,6 +126,9 @@ def new_env_file_for_el_cl_genesis_data( capella_fork_epoch, deneb_fork_epoch, electra_fork_epoch, + shadowfork_file, + min_validator_withdrawability_delay, + shard_committee_period, ): return { "UnixTimestamp": genesis_unix_timestamp, @@ -116,4 +149,7 @@ def new_env_file_for_el_cl_genesis_data( "CapellaForkVersion": constants.CAPELLA_FORK_VERSION, "DenebForkVersion": constants.DENEB_FORK_VERSION, "ElectraForkVersion": constants.ELECTRA_FORK_VERSION, + "ShadowForkFile": shadowfork_file, + "MinValidatorWithdrawabilityDelay": min_validator_withdrawability_delay, + "ShardCommitteePeriod": shard_committee_period, } diff --git a/src/shared_utils/shared_utils.star b/src/shared_utils/shared_utils.star index 52bafa711..da9f63b79 100644 --- a/src/shared_utils/shared_utils.star +++ b/src/shared_utils/shared_utils.star @@ -139,3 +139,19 @@ print(network_id, end="") """, ) return value.output + + +def get_network_name(network): + network_name = network + if ( + network != constants.NETWORK_NAME.kurtosis + and network != constants.NETWORK_NAME.ephemery + and constants.NETWORK_NAME.shadowfork not in network + and network not in constants.PUBLIC_NETWORKS + ): + network_name = "devnets" + + if constants.NETWORK_NAME.shadowfork in network: + network_name = network.split("-shadowfork")[0] + + return network_name diff --git a/src/static_files/static_files.star b/src/static_files/static_files.star index 07a4745da..40eb2254e 100644 --- a/src/static_files/static_files.star +++ b/src/static_files/static_files.star @@ -68,3 +68,5 @@ CL_GENESIS_GENERATION_MNEMONICS_TEMPLATE_FILEPATH = ( ) JWT_PATH_FILEPATH = STATIC_FILES_DIRPATH + "/jwt/jwtsecret" + +SHADOWFORK_FILEPATH = "/network-configs/latest_block.json" diff --git a/static_files/genesis-generation-config/el-cl/values.env.tmpl b/static_files/genesis-generation-config/el-cl/values.env.tmpl index 45057d8ae..cc420a4ed 100644 --- a/static_files/genesis-generation-config/el-cl/values.env.tmpl +++ b/static_files/genesis-generation-config/el-cl/values.env.tmpl @@ -21,3 +21,6 @@ export GENESIS_DELAY={{ .GenesisDelay }} export MAX_CHURN={{ .MaxChurn }} export EJECTION_BALANCE={{ .EjectionBalance }} export ETH1_FOLLOW_DISTANCE={{ .Eth1FollowDistance }} +export SHADOW_FORK_FILE={{ .ShadowForkFile }} +export MIN_VALIDATOR_WITHDRAWABILITY_DELAY={{ .MinValidatorWithdrawabilityDelay }} +export SHARD_COMMITTEE_PERIOD={{ .ShardCommitteePeriod }} From 5602a02b0e38b7a267aa53f2410652aaaf021c0c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 17:46:50 +0530 Subject: [PATCH 17/33] chore(main): release 1.4.0 (#473) :robot: I have created a release *beep* *boop* --- ## [1.4.0](https://github.com/kurtosis-tech/ethereum-package/compare/1.3.0...1.4.0) (2024-02-09) ### Features * Add suave-enabled geth support ([#489](https://github.com/kurtosis-tech/ethereum-package/issues/489)) ([631eaf3](https://github.com/kurtosis-tech/ethereum-package/commit/631eaf3e621c90d5b546a1c005d8e31e06263aa4)) * add support for custom assertoor images & use assertoor image with verkle support for verkle chains ([#483](https://github.com/kurtosis-tech/ethereum-package/issues/483)) ([2d8a143](https://github.com/kurtosis-tech/ethereum-package/commit/2d8a143f753eaa3ec13abe4ebbb57bf82548b3fb)) * add verkle-gen-devnet-3 ([#487](https://github.com/kurtosis-tech/ethereum-package/issues/487)) ([1e543e8](https://github.com/kurtosis-tech/ethereum-package/commit/1e543e873c06e86a6448f8e88c53fb1bde35338e)) * blockscout support with sc verification ([#481](https://github.com/kurtosis-tech/ethereum-package/issues/481)) ([b3418cf](https://github.com/kurtosis-tech/ethereum-package/commit/b3418cf1545378d4b412966b9c33f650141aec04)) * enable custom resource limit per network ([#471](https://github.com/kurtosis-tech/ethereum-package/issues/471)) ([5db6611](https://github.com/kurtosis-tech/ethereum-package/commit/5db6611ab831a92212a21859b42a911cd12bce0c)) * enable shadowforking ([#475](https://github.com/kurtosis-tech/ethereum-package/issues/475)) ([b788b18](https://github.com/kurtosis-tech/ethereum-package/commit/b788b18eead00622ab960a4853c8e24b09c16a26)) * improve built-in assertoor tests ([#488](https://github.com/kurtosis-tech/ethereum-package/issues/488)) ([d596699](https://github.com/kurtosis-tech/ethereum-package/commit/d5966991653ad48094cf71d3c01612349a651877)) * we no longer need 4788 deployer ([#485](https://github.com/kurtosis-tech/ethereum-package/issues/485)) ([abdfc2c](https://github.com/kurtosis-tech/ethereum-package/commit/abdfc2c3e73550069c2fbe0df5202f7f227a00cd)) ### Bug Fixes * add more prefund addresses for verkle-gen ([#482](https://github.com/kurtosis-tech/ethereum-package/issues/482)) ([01868fc](https://github.com/kurtosis-tech/ethereum-package/commit/01868fcb604852cf66474fc9de9a53a7b87b7bc3)) * bump verkle genesis generator ([#486](https://github.com/kurtosis-tech/ethereum-package/issues/486)) ([79dc5e1](https://github.com/kurtosis-tech/ethereum-package/commit/79dc5e19713d3f898f6255394290497d016f32d5)) * use latest stable image for assertoor ([#484](https://github.com/kurtosis-tech/ethereum-package/issues/484)) ([bbe0b16](https://github.com/kurtosis-tech/ethereum-package/commit/bbe0b16e948fc50f51273e2f0ab91503603e9fc9)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 21 +++++++++++++++++++++ version.txt | 2 +- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 624d28393..9211c8b72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## [1.4.0](https://github.com/kurtosis-tech/ethereum-package/compare/1.3.0...1.4.0) (2024-02-09) + + +### Features + +* Add suave-enabled geth support ([#489](https://github.com/kurtosis-tech/ethereum-package/issues/489)) ([631eaf3](https://github.com/kurtosis-tech/ethereum-package/commit/631eaf3e621c90d5b546a1c005d8e31e06263aa4)) +* add support for custom assertoor images & use assertoor image with verkle support for verkle chains ([#483](https://github.com/kurtosis-tech/ethereum-package/issues/483)) ([2d8a143](https://github.com/kurtosis-tech/ethereum-package/commit/2d8a143f753eaa3ec13abe4ebbb57bf82548b3fb)) +* add verkle-gen-devnet-3 ([#487](https://github.com/kurtosis-tech/ethereum-package/issues/487)) ([1e543e8](https://github.com/kurtosis-tech/ethereum-package/commit/1e543e873c06e86a6448f8e88c53fb1bde35338e)) +* blockscout support with sc verification ([#481](https://github.com/kurtosis-tech/ethereum-package/issues/481)) ([b3418cf](https://github.com/kurtosis-tech/ethereum-package/commit/b3418cf1545378d4b412966b9c33f650141aec04)) +* enable custom resource limit per network ([#471](https://github.com/kurtosis-tech/ethereum-package/issues/471)) ([5db6611](https://github.com/kurtosis-tech/ethereum-package/commit/5db6611ab831a92212a21859b42a911cd12bce0c)) +* enable shadowforking ([#475](https://github.com/kurtosis-tech/ethereum-package/issues/475)) ([b788b18](https://github.com/kurtosis-tech/ethereum-package/commit/b788b18eead00622ab960a4853c8e24b09c16a26)) +* improve built-in assertoor tests ([#488](https://github.com/kurtosis-tech/ethereum-package/issues/488)) ([d596699](https://github.com/kurtosis-tech/ethereum-package/commit/d5966991653ad48094cf71d3c01612349a651877)) +* we no longer need 4788 deployer ([#485](https://github.com/kurtosis-tech/ethereum-package/issues/485)) ([abdfc2c](https://github.com/kurtosis-tech/ethereum-package/commit/abdfc2c3e73550069c2fbe0df5202f7f227a00cd)) + + +### Bug Fixes + +* add more prefund addresses for verkle-gen ([#482](https://github.com/kurtosis-tech/ethereum-package/issues/482)) ([01868fc](https://github.com/kurtosis-tech/ethereum-package/commit/01868fcb604852cf66474fc9de9a53a7b87b7bc3)) +* bump verkle genesis generator ([#486](https://github.com/kurtosis-tech/ethereum-package/issues/486)) ([79dc5e1](https://github.com/kurtosis-tech/ethereum-package/commit/79dc5e19713d3f898f6255394290497d016f32d5)) +* use latest stable image for assertoor ([#484](https://github.com/kurtosis-tech/ethereum-package/issues/484)) ([bbe0b16](https://github.com/kurtosis-tech/ethereum-package/commit/bbe0b16e948fc50f51273e2f0ab91503603e9fc9)) + ## [1.3.0](https://github.com/kurtosis-tech/ethereum-package/compare/1.2.0...1.3.0) (2024-01-22) diff --git a/version.txt b/version.txt index f0bb29e76..88c5fb891 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -1.3.0 +1.4.0 From 316d42fbaeb2d7bc1d580823a6c70b1c2dfe3746 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Mon, 12 Feb 2024 13:31:55 +0100 Subject: [PATCH 18/33] feat!: add node selectors features (#491) Co-authored-by: pk910 Co-authored-by: parithosh Co-authored-by: Gyanendra Mishra --- .github/tests/node-selectors.yaml | 13 ++++++ README.md | 12 ++++++ main.star | 34 +++++++++++++-- network_params.yaml | 2 + src/assertoor/assertoor_launcher.star | 4 ++ .../beacon_metrics_gazer_launcher.star | 11 ++++- src/blob_spammer/blob_spammer.star | 4 ++ src/blobber/blobber_launcher.star | 24 +++++++++-- src/blobscan/blobscan_launcher.star | 41 ++++++++++++++++--- src/blockscout/blockscout_launcher.star | 24 ++++++++--- src/broadcaster/broadcaster.star | 10 +++-- src/cl/lighthouse/lighthouse_launcher.star | 8 ++++ src/cl/lodestar/lodestar_launcher.star | 7 ++++ src/cl/nimbus/nimbus_launcher.star | 7 ++++ src/cl/prysm/prysm_launcher.star | 7 ++++ src/cl/teku/teku_launcher.star | 14 +++---- src/dora/dora_launcher.star | 4 ++ src/el/besu/besu_launcher.star | 11 +++-- src/el/erigon/erigon_launcher.star | 11 +++-- src/el/ethereumjs/ethereumjs_launcher.star | 11 +++-- src/el/geth/geth_launcher.star | 11 +++-- src/el/nethermind/nethermind_launcher.star | 11 +++-- src/el/reth/reth_launcher.star | 11 +++-- src/el_forkmon/el_forkmon_launcher.star | 9 +++- .../ethereum_metrics_exporter_launcher.star | 2 + .../full_beaconchain_launcher.star | 12 ++++++ src/goomy_blob/goomy_blob.star | 4 ++ src/grafana/grafana_launcher.star | 4 ++ src/mev/mev_boost/mev_boost_launcher.star | 24 +++++++++-- .../mev_custom_flood_launcher.star | 10 ++++- src/mev/mev_flood/mev_flood_launcher.star | 10 ++++- src/mev/mev_relay/mev_relay_launcher.star | 7 ++++ src/mev/mock_mev/mock_mev_launcher.star | 10 ++++- src/package_io/input_parser.star | 22 ++++++++++ src/participant_network.star | 35 +++++++++++----- src/prometheus/prometheus_launcher.star | 9 +++- src/snooper/snooper_engine_launcher.star | 7 ++-- .../transaction_spammer.star | 17 +++++++- src/xatu_sentry/xatu_sentry_launcher.star | 2 + 39 files changed, 386 insertions(+), 90 deletions(-) create mode 100644 .github/tests/node-selectors.yaml diff --git a/.github/tests/node-selectors.yaml b/.github/tests/node-selectors.yaml new file mode 100644 index 000000000..a011f73dc --- /dev/null +++ b/.github/tests/node-selectors.yaml @@ -0,0 +1,13 @@ +participants: + - el_client_type: reth + cl_client_type: teku + cl_split_mode_enabled: true + node_selectors: { + "kubernetes.io/hostname": testing-1, + } + - el_client_type: reth + cl_client_type: teku + cl_split_mode_enabled: true +global_node_selectors: { + "kubernetes.io/hostname": testing-2, +} diff --git a/README.md b/README.md index 3bd75fcf1..ecb40bcea 100644 --- a/README.md +++ b/README.md @@ -264,6 +264,12 @@ participants: # Defaults to empty tolerations: [] + # Node selector + # Only works with Kubernetes + # Example: node_selectors: { "disktype": "ssd" } + # Defaults to empty + node_selectors: {} + # A list of optional extra params that will be passed to the CL client Beacon container for modifying its behaviour # If the client combines the Beacon & validator nodes (e.g. Teku, Nimbus), then this list will be passed to the combined Beacon-validator node beacon_extra_params: [] @@ -605,6 +611,12 @@ xatu_sentry_params: # toleration_seconds: 3600 # Defaults to empty global_tolerations: [] + +# Global node selector that will be passed to all containers (unless overridden by a more specific node selector) +# Only works with Kubernetes +# Example: node_selectors: { "disktype": "ssd" } +# Defaults to empty +node_selectors: {} ``` #### Example configurations diff --git a/main.star b/main.star index bef06e591..7ef2fd7ff 100644 --- a/main.star +++ b/main.star @@ -61,6 +61,7 @@ def run(plan, args={}): persistent = args_with_right_defaults.persistent xatu_sentry_params = args_with_right_defaults.xatu_sentry_params global_tolerations = args_with_right_defaults.global_tolerations + global_node_selectors = args_with_right_defaults.global_node_selectors grafana_datasource_config_template = read_file( static_files.GRAFANA_DATASOURCE_CONFIG_TEMPLATE_FILEPATH @@ -95,6 +96,7 @@ def run(plan, args={}): persistent, xatu_sentry_params, global_tolerations, + global_node_selectors, parallel_keystore_generation, ) @@ -137,7 +139,9 @@ def run(plan, args={}): if "broadcaster" in args_with_right_defaults.additional_services: args_with_right_defaults.additional_services.remove("broadcaster") broadcaster_service = broadcaster.launch_broadcaster( - plan, all_el_client_contexts + plan, + all_el_client_contexts, + global_node_selectors, ) fuzz_target = "http://{0}:{1}".format( broadcaster_service.ip_address, @@ -170,6 +174,7 @@ def run(plan, args={}): beacon_uri, raw_jwt_secret, args_with_right_defaults.global_client_log_level, + global_node_selectors, ) mev_endpoints.append(endpoint) elif ( @@ -195,6 +200,7 @@ def run(plan, args={}): fuzz_target, contract_owner.private_key, normal_user.private_key, + global_node_selectors, ) epoch_recipe = GetHttpRequestRecipe( endpoint="/eth/v2/beacon/blocks/head", @@ -218,6 +224,7 @@ def run(plan, args={}): builder_uri, network_params.seconds_per_slot, persistent, + global_node_selectors, ) mev_flood.spam_in_background( plan, @@ -238,7 +245,8 @@ def run(plan, args={}): ) if args_with_right_defaults.participants[index].validator_count != 0: mev_boost_launcher = mev_boost.new_mev_boost_launcher( - MEV_BOOST_SHOULD_CHECK_RELAY, mev_endpoints + MEV_BOOST_SHOULD_CHECK_RELAY, + mev_endpoints, ) mev_boost_service_name = "{0}-{1}-{2}-{3}".format( input_parser.MEV_BOOST_SERVICE_NAME_PREFIX, @@ -252,6 +260,7 @@ def run(plan, args={}): mev_boost_service_name, network_params.network_id, mev_params.mev_boost_image, + global_node_selectors, ) all_mevboost_contexts.append(mev_boost_context) @@ -275,6 +284,7 @@ def run(plan, args={}): fuzz_target, tx_spammer_params, network_params.electra_fork_epoch, + global_node_selectors, ) plan.print("Successfully launched transaction spammer") elif additional_service == "blob_spammer": @@ -287,6 +297,7 @@ def run(plan, args={}): network_params.deneb_fork_epoch, network_params.seconds_per_slot, network_params.genesis_delay, + global_node_selectors, ) plan.print("Successfully launched blob spammer") elif additional_service == "goomy_blob": @@ -299,6 +310,7 @@ def run(plan, args={}): all_cl_client_contexts[0], network_params.seconds_per_slot, goomy_blob_params, + global_node_selectors, ) plan.print("Successfully launched goomy the blob spammer") # We need a way to do time.sleep @@ -309,7 +321,10 @@ def run(plan, args={}): static_files.EL_FORKMON_CONFIG_TEMPLATE_FILEPATH ) el_forkmon.launch_el_forkmon( - plan, el_forkmon_config_template, all_el_client_contexts + plan, + el_forkmon_config_template, + all_el_client_contexts, + global_node_selectors, ) plan.print("Successfully launched execution layer forkmon") elif additional_service == "beacon_metrics_gazer": @@ -319,6 +334,7 @@ def run(plan, args={}): plan, all_cl_client_contexts, network_params, + global_node_selectors, ) ) launch_prometheus_grafana = True @@ -329,7 +345,10 @@ def run(plan, args={}): elif additional_service == "blockscout": plan.print("Launching blockscout") blockscout_sc_verif_url = blockscout.launch_blockscout( - plan, all_el_client_contexts, persistent + plan, + all_el_client_contexts, + persistent, + global_node_selectors, ) plan.print("Successfully launched blockscout") elif additional_service == "dora": @@ -342,6 +361,7 @@ def run(plan, args={}): el_cl_data_files_artifact_uuid, network_params.electra_fork_epoch, network_params.network, + global_node_selectors, ) plan.print("Successfully launched dora") elif additional_service == "blobscan": @@ -352,6 +372,7 @@ def run(plan, args={}): all_el_client_contexts, network_params.network_id, persistent, + global_node_selectors, ) plan.print("Successfully launched blobscan") elif additional_service == "full_beaconchain_explorer": @@ -365,6 +386,7 @@ def run(plan, args={}): all_cl_client_contexts, all_el_client_contexts, persistent, + global_node_selectors, ) plan.print("Successfully launched full-beaconchain-explorer") elif additional_service == "prometheus_grafana": @@ -383,6 +405,7 @@ def run(plan, args={}): args_with_right_defaults.participants, network_params, assertoor_params, + global_node_selectors, ) plan.print("Successfully launched assertoor") elif additional_service == "custom_flood": @@ -392,6 +415,7 @@ def run(plan, args={}): genesis_constants.PRE_FUNDED_ACCOUNTS[0].address, fuzz_target, args_with_right_defaults.custom_flood_params, + global_node_selectors, ) else: fail("Invalid additional service %s" % (additional_service)) @@ -404,6 +428,7 @@ def run(plan, args={}): prometheus_additional_metrics_jobs, all_ethereum_metrics_exporter_contexts, all_xatu_sentry_contexts, + global_node_selectors, ) plan.print("Launching grafana...") @@ -412,6 +437,7 @@ def run(plan, args={}): grafana_datasource_config_template, grafana_dashboards_config_template, prometheus_private_url, + global_node_selectors, additional_dashboards=args_with_right_defaults.grafana_additional_dashboards, ) plan.print("Successfully launched grafana") diff --git a/network_params.yaml b/network_params.yaml index 2c025b322..d03330642 100644 --- a/network_params.yaml +++ b/network_params.yaml @@ -11,6 +11,7 @@ participants: cl_tolerations: [] validator_tolerations: [] tolerations: [] + node_selectors: {} beacon_extra_params: [] beacon_extra_labels: {} validator_extra_params: [] @@ -87,3 +88,4 @@ grafana_additional_dashboards: [] persistent: false xatu_sentry_enabled: false global_tolerations: [] +global_node_selectors: {} diff --git a/src/assertoor/assertoor_launcher.star b/src/assertoor/assertoor_launcher.star index 9a41d8e31..94a73d0cc 100644 --- a/src/assertoor/assertoor_launcher.star +++ b/src/assertoor/assertoor_launcher.star @@ -36,6 +36,7 @@ def launch_assertoor( participant_configs, network_params, assertoor_params, + global_node_selectors, ): all_client_info = [] validator_client_info = [] @@ -91,6 +92,7 @@ def launch_assertoor( tests_config_artifacts_name, network_params, assertoor_params, + global_node_selectors, ) plan.add_service(SERVICE_NAME, config) @@ -101,6 +103,7 @@ def get_config( tests_config_artifacts_name, network_params, assertoor_params, + node_selectors, ): config_file_path = shared_utils.path_join( ASSERTOOR_CONFIG_MOUNT_DIRPATH_ON_SERVICE, @@ -127,6 +130,7 @@ def get_config( max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/beacon_metrics_gazer/beacon_metrics_gazer_launcher.star b/src/beacon_metrics_gazer/beacon_metrics_gazer_launcher.star index bce574e50..2d0b6739c 100644 --- a/src/beacon_metrics_gazer/beacon_metrics_gazer_launcher.star +++ b/src/beacon_metrics_gazer/beacon_metrics_gazer_launcher.star @@ -31,10 +31,16 @@ MIN_MEMORY = 20 MAX_MEMORY = 300 -def launch_beacon_metrics_gazer(plan, cl_client_contexts, network_params): +def launch_beacon_metrics_gazer( + plan, + cl_client_contexts, + network_params, + global_node_selectors, +): config = get_config( cl_client_contexts[0].ip_addr, cl_client_contexts[0].http_port_num, + global_node_selectors, ) beacon_metrics_gazer_service = plan.add_service(SERVICE_NAME, config) @@ -51,7 +57,7 @@ def launch_beacon_metrics_gazer(plan, cl_client_contexts, network_params): ) -def get_config(ip_addr, http_port_num): +def get_config(ip_addr, http_port_num, node_selectors): config_file_path = shared_utils.path_join( BEACON_METRICS_GAZER_CONFIG_MOUNT_DIRPATH_ON_SERVICE, BEACON_METRICS_GAZER_CONFIG_FILENAME, @@ -76,4 +82,5 @@ def get_config(ip_addr, http_port_num): max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/blob_spammer/blob_spammer.star b/src/blob_spammer/blob_spammer.star index 10cadc5cf..c294e60ff 100644 --- a/src/blob_spammer/blob_spammer.star +++ b/src/blob_spammer/blob_spammer.star @@ -18,6 +18,7 @@ def launch_blob_spammer( deneb_fork_epoch, seconds_per_slot, genesis_delay, + global_node_selectors, ): config = get_config( prefunded_addresses, @@ -26,6 +27,7 @@ def launch_blob_spammer( deneb_fork_epoch, seconds_per_slot, genesis_delay, + global_node_selectors, ) plan.add_service(SERVICE_NAME, config) @@ -37,6 +39,7 @@ def get_config( deneb_fork_epoch, seconds_per_slot, genesis_delay, + node_selectors, ): dencunTime = (deneb_fork_epoch * 32 * seconds_per_slot) + genesis_delay return ServiceConfig( @@ -68,4 +71,5 @@ def get_config( max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/blobber/blobber_launcher.star b/src/blobber/blobber_launcher.star index 0f2873ace..8d9438f97 100644 --- a/src/blobber/blobber_launcher.star +++ b/src/blobber/blobber_launcher.star @@ -35,11 +35,22 @@ MIN_MEMORY = 10 MAX_MEMORY = 300 -def launch(plan, service_name, node_keystore_files, beacon_http_url, extra_params): +def launch( + plan, + service_name, + node_keystore_files, + beacon_http_url, + extra_params, + node_selectors, +): blobber_service_name = "{0}".format(service_name) blobber_config = get_config( - service_name, node_keystore_files, beacon_http_url, extra_params + service_name, + node_keystore_files, + beacon_http_url, + extra_params, + node_selectors, ) blobber_service = plan.add_service(blobber_service_name, blobber_config) @@ -49,7 +60,13 @@ def launch(plan, service_name, node_keystore_files, beacon_http_url, extra_param ) -def get_config(service_name, node_keystore_files, beacon_http_url, extra_params): +def get_config( + service_name, + node_keystore_files, + beacon_http_url, + extra_params, + node_selectors, +): validator_root_dirpath = shared_utils.path_join( VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS, node_keystore_files.raw_root_dirpath, @@ -78,4 +95,5 @@ def get_config(service_name, node_keystore_files, beacon_http_url, extra_params) max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/blobscan/blobscan_launcher.star b/src/blobscan/blobscan_launcher.star index a8a22ea81..2a5c90591 100644 --- a/src/blobscan/blobscan_launcher.star +++ b/src/blobscan/blobscan_launcher.star @@ -59,7 +59,9 @@ def launch_blobscan( el_client_contexts, chain_id, persistent, + global_node_selectors, ): + node_selectors = global_node_selectors beacon_node_rpc_uri = "http://{0}:{1}".format( cl_client_contexts[0].ip_addr, cl_client_contexts[0].http_port_num ) @@ -75,24 +77,43 @@ def launch_blobscan( min_memory=POSTGRES_MIN_MEMORY, max_memory=POSTGRES_MAX_MEMORY, persistent=persistent, + node_selectors=node_selectors, + ) + api_config = get_api_config( + postgres_output.url, + beacon_node_rpc_uri, + chain_id, + node_selectors, ) - api_config = get_api_config(postgres_output.url, beacon_node_rpc_uri, chain_id) blobscan_config = plan.add_service(API_SERVICE_NAME, api_config) blobscan_api_url = "http://{0}:{1}".format( blobscan_config.ip_address, blobscan_config.ports[HTTP_PORT_ID].number ) - web_config = get_web_config(postgres_output.url, beacon_node_rpc_uri, chain_id) + web_config = get_web_config( + postgres_output.url, + beacon_node_rpc_uri, + chain_id, + node_selectors, + ) plan.add_service(WEB_SERVICE_NAME, web_config) indexer_config = get_indexer_config( - beacon_node_rpc_uri, execution_node_rpc_uri, blobscan_api_url + beacon_node_rpc_uri, + execution_node_rpc_uri, + blobscan_api_url, + node_selectors, ) plan.add_service(INDEXER_SERVICE_NAME, indexer_config) -def get_api_config(database_url, beacon_node_rpc, chain_id): +def get_api_config( + database_url, + beacon_node_rpc, + chain_id, + node_selectors, +): IMAGE_NAME = "blossomlabs/blobscan:stable" return ServiceConfig( @@ -121,10 +142,11 @@ def get_api_config(database_url, beacon_node_rpc, chain_id): max_cpu=API_MAX_CPU, min_memory=API_MIN_MEMORY, max_memory=API_MAX_MEMORY, + node_selectors=node_selectors, ) -def get_web_config(database_url, beacon_node_rpc, chain_id): +def get_web_config(database_url, beacon_node_rpc, chain_id, node_selectors): # TODO: https://github.com/kurtosis-tech/kurtosis/issues/1861 # Configure NEXT_PUBLIC_BEACON_BASE_URL and NEXT_PUBLIC_EXPLORER_BASE env vars # once retrieving external URLs from services are supported in Kurtosis. @@ -145,10 +167,16 @@ def get_web_config(database_url, beacon_node_rpc, chain_id): max_cpu=WEB_MAX_CPU, min_memory=WEB_MIN_MEMORY, max_memory=WEB_MAX_MEMORY, + node_selectors=node_selectors, ) -def get_indexer_config(beacon_node_rpc, execution_node_rpc, blobscan_api_url): +def get_indexer_config( + beacon_node_rpc, + execution_node_rpc, + blobscan_api_url, + node_selectors, +): IMAGE_NAME = "blossomlabs/blobscan-indexer:master" return ServiceConfig( @@ -165,4 +193,5 @@ def get_indexer_config(beacon_node_rpc, execution_node_rpc, blobscan_api_url): max_cpu=INDEX_MAX_CPU, min_memory=INDEX_MIN_MEMORY, max_memory=INDEX_MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/blockscout/blockscout_launcher.star b/src/blockscout/blockscout_launcher.star index 23ce9a366..5c515d9d7 100644 --- a/src/blockscout/blockscout_launcher.star +++ b/src/blockscout/blockscout_launcher.star @@ -38,13 +38,19 @@ VERIF_USED_PORTS = { } -def launch_blockscout(plan, el_client_contexts, persistent): +def launch_blockscout( + plan, + el_client_contexts, + persistent, + global_node_selectors, +): postgres_output = postgres.run( plan, service_name="{}-postgres".format(SERVICE_NAME_BLOCKSCOUT), database="blockscout", extra_configs=["max_connections=1000"], persistent=persistent, + node_selectors=global_node_selectors, ) el_client_context = el_client_contexts[0] @@ -53,7 +59,7 @@ def launch_blockscout(plan, el_client_contexts, persistent): ) el_client_name = el_client_context.client_name - config_verif = get_config_verif() + config_verif = get_config_verif(global_node_selectors) verif_service_name = "{}-verif".format(SERVICE_NAME_BLOCKSCOUT) verif_service = plan.add_service(verif_service_name, config_verif) verif_url = "http://{}:{}/api".format( @@ -61,7 +67,11 @@ def launch_blockscout(plan, el_client_contexts, persistent): ) config_backend = get_config_backend( - postgres_output, el_client_rpc_url, verif_url, el_client_name + postgres_output, + el_client_rpc_url, + verif_url, + el_client_name, + global_node_selectors, ) blockscout_service = plan.add_service(SERVICE_NAME_BLOCKSCOUT, config_backend) plan.print(blockscout_service) @@ -73,7 +83,7 @@ def launch_blockscout(plan, el_client_contexts, persistent): return blockscout_url -def get_config_verif(): +def get_config_verif(node_selectors): return ServiceConfig( image=IMAGE_NAME_BLOCKSCOUT_VERIF, ports=VERIF_USED_PORTS, @@ -86,10 +96,13 @@ def get_config_verif(): max_cpu=BLOCKSCOUT_VERIF_MAX_CPU, min_memory=BLOCKSCOUT_VERIF_MIN_MEMORY, max_memory=BLOCKSCOUT_VERIF_MAX_MEMORY, + node_selectors=node_selectors, ) -def get_config_backend(postgres_output, el_client_rpc_url, verif_url, el_client_name): +def get_config_backend( + postgres_output, el_client_rpc_url, verif_url, el_client_name, node_selectors +): database_url = "{protocol}://{user}:{password}@{hostname}:{port}/{database}".format( protocol="postgresql", user=postgres_output.user, @@ -128,4 +141,5 @@ def get_config_backend(postgres_output, el_client_rpc_url, verif_url, el_client_ max_cpu=BLOCKSCOUT_MAX_CPU, min_memory=BLOCKSCOUT_MIN_MEMORY, max_memory=BLOCKSCOUT_MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/broadcaster/broadcaster.star b/src/broadcaster/broadcaster.star index 5a26b56bb..1cb671598 100644 --- a/src/broadcaster/broadcaster.star +++ b/src/broadcaster/broadcaster.star @@ -9,12 +9,15 @@ MIN_MEMORY = 128 MAX_MEMORY = 2048 -def launch_broadcaster(plan, all_el_client_contexts): - config = get_config(all_el_client_contexts) +def launch_broadcaster(plan, all_el_client_contexts, global_node_selectors): + config = get_config(all_el_client_contexts, global_node_selectors) return plan.add_service(SERVICE_NAME, config) -def get_config(all_el_client_contexts): +def get_config( + all_el_client_contexts, + node_selectors, +): return ServiceConfig( image=IMAGE_NAME, cmd=[ @@ -25,4 +28,5 @@ def get_config(all_el_client_contexts): max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/cl/lighthouse/lighthouse_launcher.star b/src/cl/lighthouse/lighthouse_launcher.star index 53b68d9ff..c22694847 100644 --- a/src/cl/lighthouse/lighthouse_launcher.star +++ b/src/cl/lighthouse/lighthouse_launcher.star @@ -123,6 +123,7 @@ def launch( validator_tolerations, participant_tolerations, global_tolerations, + node_selectors, split_mode_enabled=False, ): beacon_service_name = "{0}".format(service_name) @@ -181,6 +182,7 @@ def launch( persistent, cl_volume_size, tolerations, + node_selectors, ) beacon_service = plan.add_service(beacon_service_name, beacon_config) @@ -197,6 +199,7 @@ def launch( node_keystore_files, beacon_http_url, blobber_extra_params, + node_selectors, ) blobber_service = plan.add_service(blobber_service_name, blobber_config) @@ -234,6 +237,7 @@ def launch( extra_validator_labels, persistent, tolerations, + node_selectors, ) validator_service = plan.add_service(validator_service_name, validator_config) @@ -313,6 +317,7 @@ def get_beacon_config( persistent, cl_volume_size, tolerations, + node_selectors, ): # If snooper is enabled use the snooper engine context, otherwise use the execution client context if snooper_enabled: @@ -465,6 +470,7 @@ def get_beacon_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) @@ -484,6 +490,7 @@ def get_validator_config( extra_labels, persistent, tolerations, + node_selectors, ): validator_keys_dirpath = shared_utils.path_join( VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS, @@ -550,6 +557,7 @@ def get_validator_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/cl/lodestar/lodestar_launcher.star b/src/cl/lodestar/lodestar_launcher.star index f12b6c473..4ca3abd5b 100644 --- a/src/cl/lodestar/lodestar_launcher.star +++ b/src/cl/lodestar/lodestar_launcher.star @@ -102,6 +102,7 @@ def launch( validator_tolerations, participant_tolerations, global_tolerations, + node_selectors, split_mode_enabled=False, ): beacon_service_name = "{0}".format(service_name) @@ -159,6 +160,7 @@ def launch( persistent, cl_volume_size, tolerations, + node_selectors, ) beacon_service = plan.add_service(beacon_service_name, beacon_config) @@ -213,6 +215,7 @@ def launch( extra_validator_labels, persistent, tolerations, + node_selectors, ) plan.add_service(validator_service_name, validator_config) @@ -284,6 +287,7 @@ def get_beacon_config( persistent, cl_volume_size, tolerations, + node_selectors, ): el_client_rpc_url_str = "http://{0}:{1}".format( el_client_context.ip_addr, @@ -417,6 +421,7 @@ def get_beacon_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) @@ -436,6 +441,7 @@ def get_validator_config( extra_labels, persistent, tolerations, + node_selectors, ): root_dirpath = shared_utils.path_join( VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER, service_name @@ -500,6 +506,7 @@ def get_validator_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/cl/nimbus/nimbus_launcher.star b/src/cl/nimbus/nimbus_launcher.star index 0c066d917..71a286c56 100644 --- a/src/cl/nimbus/nimbus_launcher.star +++ b/src/cl/nimbus/nimbus_launcher.star @@ -137,6 +137,7 @@ def launch( validator_tolerations, participant_tolerations, global_tolerations, + node_selectors, split_mode_enabled, ): beacon_service_name = "{0}".format(service_name) @@ -196,6 +197,7 @@ def launch( persistent, cl_volume_size, tolerations, + node_selectors, ) beacon_service = plan.add_service(beacon_service_name, beacon_config) @@ -255,6 +257,7 @@ def launch( extra_validator_labels, persistent, tolerations, + node_selectors, ) validator_service = plan.add_service(validator_service_name, validator_config) @@ -310,6 +313,7 @@ def get_beacon_config( persistent, cl_volume_size, tolerations, + node_selectors, ): validator_keys_dirpath = "" validator_secrets_dirpath = "" @@ -439,6 +443,7 @@ def get_beacon_config( ), user=User(uid=0, gid=0), tolerations=tolerations, + node_selectors=node_selectors, ) @@ -458,6 +463,7 @@ def get_validator_config( extra_labels, persistent, tolerations, + node_selectors, ): validator_keys_dirpath = "" validator_secrets_dirpath = "" @@ -512,6 +518,7 @@ def get_validator_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/cl/prysm/prysm_launcher.star b/src/cl/prysm/prysm_launcher.star index dd86ce1cc..697694d6f 100644 --- a/src/cl/prysm/prysm_launcher.star +++ b/src/cl/prysm/prysm_launcher.star @@ -114,6 +114,7 @@ def launch( validator_tolerations, participant_tolerations, global_tolerations, + node_selectors, split_mode_enabled=False, ): split_images = images.split(IMAGE_SEPARATOR_DELIMITER) @@ -185,6 +186,7 @@ def launch( persistent, cl_volume_size, tolerations, + node_selectors, ) beacon_service = plan.add_service(beacon_service_name, beacon_config) @@ -223,6 +225,7 @@ def launch( launcher.prysm_password_artifact_uuid, persistent, tolerations, + node_selectors, ) validator_service = plan.add_service(validator_service_name, validator_config) @@ -302,6 +305,7 @@ def get_beacon_config( persistent, cl_volume_size, tolerations, + node_selectors, ): # If snooper is enabled use the snooper engine context, otherwise use the execution client context if snooper_enabled: @@ -433,6 +437,7 @@ def get_beacon_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) @@ -455,6 +460,7 @@ def get_validator_config( prysm_password_artifact_uuid, persistent, tolerations, + node_selectors, ): validator_keys_dirpath = shared_utils.path_join( VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER, @@ -520,6 +526,7 @@ def get_validator_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/cl/teku/teku_launcher.star b/src/cl/teku/teku_launcher.star index 0fd47bd5d..80aae97b8 100644 --- a/src/cl/teku/teku_launcher.star +++ b/src/cl/teku/teku_launcher.star @@ -127,6 +127,7 @@ def launch( validator_tolerations, participant_tolerations, global_tolerations, + node_selectors, split_mode_enabled, ): beacon_service_name = "{0}".format(service_name) @@ -145,13 +146,6 @@ def launch( param for param in extra_validator_params ] - # Holesky has a bigger memory footprint, so it needs more memory - if launcher.network == "holesky": - holesky_beacon_memory_limit = 4096 - bn_max_mem = ( - int(bn_max_mem) if int(bn_max_mem) > 0 else holesky_beacon_memory_limit - ) - network_name = shared_utils.get_network_name(launcher.network) bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU @@ -196,6 +190,7 @@ def launch( persistent, cl_volume_size, tolerations, + node_selectors, ) beacon_service = plan.add_service(service_name, config) @@ -258,6 +253,7 @@ def launch( extra_validator_labels, persistent, tolerations, + node_selectors, ) validator_service = plan.add_service(validator_service_name, validator_config) @@ -313,6 +309,7 @@ def get_beacon_config( persistent, cl_volume_size, tolerations, + node_selectors, ): validator_keys_dirpath = "" validator_secrets_dirpath = "" @@ -492,6 +489,7 @@ def get_beacon_config( ), user=User(uid=0, gid=0), tolerations=tolerations, + node_selectors=node_selectors, ) @@ -512,6 +510,7 @@ def get_validator_config( extra_labels, persistent, tolerations, + node_selectors, ): validator_keys_dirpath = "" validator_secrets_dirpath = "" @@ -577,6 +576,7 @@ def get_validator_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/dora/dora_launcher.star b/src/dora/dora_launcher.star index d862b7ceb..568fd230d 100644 --- a/src/dora/dora_launcher.star +++ b/src/dora/dora_launcher.star @@ -34,6 +34,7 @@ def launch_dora( el_cl_data_files_artifact_uuid, electra_fork_epoch, network, + global_node_selectors, ): all_cl_client_info = [] for index, client in enumerate(cl_client_contexts): @@ -62,6 +63,7 @@ def launch_dora( el_cl_data_files_artifact_uuid, electra_fork_epoch, network, + global_node_selectors, ) plan.add_service(SERVICE_NAME, config) @@ -72,6 +74,7 @@ def get_config( el_cl_data_files_artifact_uuid, electra_fork_epoch, network, + node_selectors, ): config_file_path = shared_utils.path_join( DORA_CONFIG_MOUNT_DIRPATH_ON_SERVICE, @@ -97,6 +100,7 @@ def get_config( max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/el/besu/besu_launcher.star b/src/el/besu/besu_launcher.star index ac117880f..7b6900719 100644 --- a/src/el/besu/besu_launcher.star +++ b/src/el/besu/besu_launcher.star @@ -76,16 +76,12 @@ def launch( extra_labels, persistent, el_volume_size, - el_tolerations, - participant_tolerations, - global_tolerations, + tolerations, + node_selectors, ): log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) - tolerations = input_parser.get_client_tolerations( - el_tolerations, participant_tolerations, global_tolerations - ) network_name = shared_utils.get_network_name(launcher.network) @@ -130,6 +126,7 @@ def launch( persistent, el_volume_size, tolerations, + node_selectors, ) service = plan.add_service(service_name, config) @@ -174,6 +171,7 @@ def get_config( persistent, el_volume_size, tolerations, + node_selectors, ): cmd = [ "besu", @@ -272,6 +270,7 @@ def get_config( ), user=User(uid=0, gid=0), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/el/erigon/erigon_launcher.star b/src/el/erigon/erigon_launcher.star index cfee870c0..02539e8d0 100644 --- a/src/el/erigon/erigon_launcher.star +++ b/src/el/erigon/erigon_launcher.star @@ -76,16 +76,12 @@ def launch( extra_labels, persistent, el_volume_size, - el_tolerations, - participant_tolerations, - global_tolerations, + tolerations, + node_selectors, ): log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) - tolerations = input_parser.get_client_tolerations( - el_tolerations, participant_tolerations, global_tolerations - ) network_name = shared_utils.get_network_name(launcher.network) @@ -132,6 +128,7 @@ def launch( persistent, el_volume_size, tolerations, + node_selectors, ) service = plan.add_service(service_name, config) @@ -180,6 +177,7 @@ def get_config( persistent, el_volume_size, tolerations, + node_selectors, ): init_datadir_cmd_str = "erigon init --datadir={0} {1}".format( EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, @@ -294,6 +292,7 @@ def get_config( ), user=User(uid=0, gid=0), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/el/ethereumjs/ethereumjs_launcher.star b/src/el/ethereumjs/ethereumjs_launcher.star index aa0eaeb63..d15e17717 100644 --- a/src/el/ethereumjs/ethereumjs_launcher.star +++ b/src/el/ethereumjs/ethereumjs_launcher.star @@ -78,16 +78,12 @@ def launch( extra_labels, persistent, el_volume_size, - el_tolerations, - participant_tolerations, - global_tolerations, + tolerations, + node_selectors, ): log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) - tolerations = input_parser.get_client_tolerations( - el_tolerations, participant_tolerations, global_tolerations - ) network_name = shared_utils.get_network_name(launcher.network) @@ -132,6 +128,7 @@ def launch( persistent, el_volume_size, tolerations, + node_selectors, ) service = plan.add_service(service_name, config) @@ -175,6 +172,7 @@ def get_config( persistent, el_volume_size, tolerations, + node_selectors, ): cmd = [ "--dataDir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, @@ -260,6 +258,7 @@ def get_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/el/geth/geth_launcher.star b/src/el/geth/geth_launcher.star index 830beb98e..e2a61c044 100644 --- a/src/el/geth/geth_launcher.star +++ b/src/el/geth/geth_launcher.star @@ -87,16 +87,12 @@ def launch( extra_labels, persistent, el_volume_size, - el_tolerations, - participant_tolerations, - global_tolerations, + tolerations, + node_selectors, ): log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) - tolerations = input_parser.get_client_tolerations( - el_tolerations, participant_tolerations, global_tolerations - ) network_name = shared_utils.get_network_name(launcher.network) @@ -146,6 +142,7 @@ def launch( persistent, el_volume_size, tolerations, + node_selectors, ) service = plan.add_service(service_name, config) @@ -197,6 +194,7 @@ def get_config( persistent, el_volume_size, tolerations, + node_selectors, ): # TODO: Remove this once electra fork has path based storage scheme implemented if ( @@ -379,6 +377,7 @@ def get_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/el/nethermind/nethermind_launcher.star b/src/el/nethermind/nethermind_launcher.star index ba7be24a7..9a90592ea 100644 --- a/src/el/nethermind/nethermind_launcher.star +++ b/src/el/nethermind/nethermind_launcher.star @@ -74,16 +74,12 @@ def launch( extra_labels, persistent, el_volume_size, - el_tolerations, - participant_tolerations, - global_tolerations, + tolerations, + node_selectors, ): log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) - tolerations = input_parser.get_client_tolerations( - el_tolerations, participant_tolerations, global_tolerations - ) network_name = shared_utils.get_network_name(launcher.network) @@ -128,6 +124,7 @@ def launch( persistent, el_volume_size, tolerations, + node_selectors, ) service = plan.add_service(service_name, config) @@ -173,6 +170,7 @@ def get_config( persistent, el_volume_size, tolerations, + node_selectors, ): cmd = [ "--log=" + log_level, @@ -268,6 +266,7 @@ def get_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/el/reth/reth_launcher.star b/src/el/reth/reth_launcher.star index 8faf1d6ed..c391aea86 100644 --- a/src/el/reth/reth_launcher.star +++ b/src/el/reth/reth_launcher.star @@ -77,16 +77,12 @@ def launch( extra_labels, persistent, el_volume_size, - el_tolerations, - participant_tolerations, - global_tolerations, + tolerations, + node_selectors, ): log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) - tolerations = input_parser.get_client_tolerations( - el_tolerations, participant_tolerations, global_tolerations - ) network_name = shared_utils.get_network_name(launcher.network) @@ -131,6 +127,7 @@ def launch( persistent, el_volume_size, tolerations, + node_selectors, ) service = plan.add_service(service_name, config) @@ -175,6 +172,7 @@ def get_config( persistent, el_volume_size, tolerations, + node_selectors, ): init_datadir_cmd_str = "reth init --datadir={0} --chain={1}".format( EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, @@ -274,6 +272,7 @@ def get_config( extra_labels, ), tolerations=tolerations, + node_selectors=node_selectors, ) diff --git a/src/el_forkmon/el_forkmon_launcher.star b/src/el_forkmon/el_forkmon_launcher.star index 0f0beceee..a03df2999 100644 --- a/src/el_forkmon/el_forkmon_launcher.star +++ b/src/el_forkmon/el_forkmon_launcher.star @@ -30,6 +30,7 @@ def launch_el_forkmon( plan, config_template, el_client_contexts, + global_node_selectors, ): all_el_client_info = [] for client in el_client_contexts: @@ -52,12 +53,15 @@ def launch_el_forkmon( template_and_data_by_rel_dest_filepath, "el-forkmon-config" ) - config = get_config(config_files_artifact_name) + config = get_config( + config_files_artifact_name, + global_node_selectors, + ) plan.add_service(SERVICE_NAME, config) -def get_config(config_files_artifact_name): +def get_config(config_files_artifact_name, node_selectors): config_file_path = shared_utils.path_join( EL_FORKMON_CONFIG_MOUNT_DIRPATH_ON_SERVICE, EL_FORKMON_CONFIG_FILENAME ) @@ -72,6 +76,7 @@ def get_config(config_files_artifact_name): max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/ethereum_metrics_exporter/ethereum_metrics_exporter_launcher.star b/src/ethereum_metrics_exporter/ethereum_metrics_exporter_launcher.star index 608ccd6df..ba5321cc7 100644 --- a/src/ethereum_metrics_exporter/ethereum_metrics_exporter_launcher.star +++ b/src/ethereum_metrics_exporter/ethereum_metrics_exporter_launcher.star @@ -22,6 +22,7 @@ def launch( ethereum_metrics_exporter_service_name, el_client_context, cl_client_context, + node_selectors, ): exporter_service = plan.add_service( ethereum_metrics_exporter_service_name, @@ -52,6 +53,7 @@ def launch( max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ), ) diff --git a/src/full_beaconchain/full_beaconchain_launcher.star b/src/full_beaconchain/full_beaconchain_launcher.star index 1c22e232d..f0f87d37d 100644 --- a/src/full_beaconchain/full_beaconchain_launcher.star +++ b/src/full_beaconchain/full_beaconchain_launcher.star @@ -97,7 +97,9 @@ def launch_full_beacon( cl_client_contexts, el_client_contexts, persistent, + global_node_selectors, ): + node_selectors = global_node_selectors postgres_output = postgres.run( plan, service_name="beaconchain-postgres", @@ -110,6 +112,7 @@ def launch_full_beacon( min_memory=POSTGRES_MIN_MEMORY, max_memory=POSTGRES_MAX_MEMORY, persistent=persistent, + node_selectors=node_selectors, ) redis_output = redis.run( plan, @@ -119,6 +122,7 @@ def launch_full_beacon( max_cpu=REDIS_MAX_CPU, min_memory=REDIS_MIN_MEMORY, max_memory=REDIS_MAX_MEMORY, + node_selectors=node_selectors, ) # TODO perhaps create a new service for the littlebigtable little_bigtable = plan.add_service( @@ -134,6 +138,7 @@ def launch_full_beacon( max_cpu=LITTLE_BIGTABLE_MAX_CPU, min_memory=LITTLE_BIGTABLE_MIN_MEMORY, max_memory=LITTLE_BIGTABLE_MAX_MEMORY, + node_selectors=node_selectors, ), ) @@ -178,6 +183,7 @@ def launch_full_beacon( max_cpu=INIT_MAX_CPU, min_memory=INIT_MIN_MEMORY, max_memory=INIT_MAX_MEMORY, + node_selectors=node_selectors, ), ) @@ -224,6 +230,7 @@ def launch_full_beacon( max_cpu=INDEXER_MAX_CPU, min_memory=INDEXER_MIN_MEMORY, max_memory=INDEXER_MAX_MEMORY, + node_selectors=node_selectors, ), ) # Start the eth1indexer @@ -250,6 +257,7 @@ def launch_full_beacon( max_cpu=ETH1INDEXER_MAX_CPU, min_memory=ETH1INDEXER_MIN_MEMORY, max_memory=ETH1INDEXER_MAX_MEMORY, + node_selectors=node_selectors, ), ) @@ -269,6 +277,7 @@ def launch_full_beacon( max_cpu=REWARDSEXPORTER_MAX_CPU, min_memory=REWARDSEXPORTER_MIN_MEMORY, max_memory=REWARDSEXPORTER_MAX_MEMORY, + node_selectors=node_selectors, ), ) @@ -291,6 +300,7 @@ def launch_full_beacon( max_cpu=STATISTICS_MAX_CPU, min_memory=STATISTICS_MIN_MEMORY, max_memory=STATISTICS_MAX_MEMORY, + node_selectors=node_selectors, ), ) @@ -310,6 +320,7 @@ def launch_full_beacon( max_cpu=FDU_MAX_CPU, min_memory=FDU_MIN_MEMORY, max_memory=FDU_MAX_MEMORY, + node_selectors=node_selectors, ), ) @@ -337,6 +348,7 @@ def launch_full_beacon( max_cpu=FRONTEND_MAX_CPU, min_memory=FRONTEND_MIN_MEMORY, max_memory=FRONTEND_MAX_MEMORY, + node_selectors=node_selectors, ), ) diff --git a/src/goomy_blob/goomy_blob.star b/src/goomy_blob/goomy_blob.star index d62c2c756..b6f55c6f2 100644 --- a/src/goomy_blob/goomy_blob.star +++ b/src/goomy_blob/goomy_blob.star @@ -17,6 +17,7 @@ def launch_goomy_blob( cl_client_context, seconds_per_slot, goomy_blob_params, + global_node_selectors, ): config = get_config( prefunded_addresses, @@ -24,6 +25,7 @@ def launch_goomy_blob( cl_client_context, seconds_per_slot, goomy_blob_params.goomy_blob_args, + global_node_selectors, ) plan.add_service(SERVICE_NAME, config) @@ -34,6 +36,7 @@ def get_config( cl_client_context, seconds_per_slot, goomy_blob_args, + node_selectors, ): goomy_cli_args = [] for index, client in enumerate(el_client_contexts): @@ -77,4 +80,5 @@ def get_config( max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/grafana/grafana_launcher.star b/src/grafana/grafana_launcher.star index b7f335214..eec063713 100644 --- a/src/grafana/grafana_launcher.star +++ b/src/grafana/grafana_launcher.star @@ -50,6 +50,7 @@ def launch_grafana( datasource_config_template, dashboard_providers_config_template, prometheus_private_url, + global_node_selectors, additional_dashboards=[], ): ( @@ -73,6 +74,7 @@ def launch_grafana( config = get_config( grafana_config_artifacts_uuid, merged_dashboards_artifact_name, + global_node_selectors, ) plan.add_service(SERVICE_NAME, config) @@ -127,6 +129,7 @@ def get_grafana_config_dir_artifact_uuid( def get_config( grafana_config_artifacts_name, grafana_dashboards_artifacts_name, + node_selectors, ): return ServiceConfig( image=IMAGE_NAME, @@ -146,6 +149,7 @@ def get_config( max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/mev/mev_boost/mev_boost_launcher.star b/src/mev/mev_boost/mev_boost_launcher.star index 904344692..a54da92a4 100644 --- a/src/mev/mev_boost/mev_boost_launcher.star +++ b/src/mev/mev_boost/mev_boost_launcher.star @@ -23,8 +23,20 @@ MIN_MEMORY = 16 MAX_MEMORY = 256 -def launch(plan, mev_boost_launcher, service_name, network_id, mev_boost_image): - config = get_config(mev_boost_launcher, network_id, mev_boost_image) +def launch( + plan, + mev_boost_launcher, + service_name, + network_id, + mev_boost_image, + global_node_selectors, +): + config = get_config( + mev_boost_launcher, + network_id, + mev_boost_image, + global_node_selectors, + ) mev_boost_service = plan.add_service(service_name, config) @@ -33,7 +45,12 @@ def launch(plan, mev_boost_launcher, service_name, network_id, mev_boost_image): ) -def get_config(mev_boost_launcher, network_id, mev_boost_image): +def get_config( + mev_boost_launcher, + network_id, + mev_boost_image, + node_selectors, +): command = ["mev-boost"] if mev_boost_launcher.should_check_relay: @@ -60,6 +77,7 @@ def get_config(mev_boost_launcher, network_id, mev_boost_image): max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/mev/mev_custom_flood/mev_custom_flood_launcher.star b/src/mev/mev_custom_flood/mev_custom_flood_launcher.star index 928682624..ce062b214 100644 --- a/src/mev/mev_custom_flood/mev_custom_flood_launcher.star +++ b/src/mev/mev_custom_flood/mev_custom_flood_launcher.star @@ -8,7 +8,14 @@ MIN_MEMORY = 128 MAX_MEMORY = 1024 -def spam_in_background(plan, sender_key, receiver_key, el_uri, params): +def spam_in_background( + plan, + sender_key, + receiver_key, + el_uri, + params, + global_node_selectors, +): sender_script = plan.upload_files(src="./sender.py", name="mev-custom-flood-sender") plan.add_service( @@ -26,6 +33,7 @@ def spam_in_background(plan, sender_key, receiver_key, el_uri, params): max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=global_node_selectors, ), ) diff --git a/src/mev/mev_flood/mev_flood_launcher.star b/src/mev/mev_flood/mev_flood_launcher.star index 1715e9e4a..7a8218409 100644 --- a/src/mev/mev_flood/mev_flood_launcher.star +++ b/src/mev/mev_flood/mev_flood_launcher.star @@ -12,7 +12,14 @@ def prefixed_address(address): return "0x" + address -def launch_mev_flood(plan, image, el_uri, contract_owner, normal_user): +def launch_mev_flood( + plan, + image, + el_uri, + contract_owner, + normal_user, + global_node_selectors, +): plan.add_service( name="mev-flood", config=ServiceConfig( @@ -22,6 +29,7 @@ def launch_mev_flood(plan, image, el_uri, contract_owner, normal_user): max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=global_node_selectors, ), ) diff --git a/src/mev/mev_relay/mev_relay_launcher.star b/src/mev/mev_relay/mev_relay_launcher.star index e56ec46bd..bb857912e 100644 --- a/src/mev/mev_relay/mev_relay_launcher.star +++ b/src/mev/mev_relay/mev_relay_launcher.star @@ -48,7 +48,9 @@ def launch_mev_relay( builder_uri, seconds_per_slot, persistent, + global_node_selectors, ): + node_selectors = global_node_selectors redis = redis_module.run( plan, service_name="mev-relay-redis", @@ -56,6 +58,7 @@ def launch_mev_relay( max_cpu=REDIS_MAX_CPU, min_memory=REDIS_MIN_MEMORY, max_memory=REDIS_MAX_MEMORY, + node_selectors=node_selectors, ) # making the password postgres as the relay expects it to be postgres postgres = postgres_module.run( @@ -70,6 +73,7 @@ def launch_mev_relay( max_cpu=POSTGRES_MAX_CPU, min_memory=POSTGRES_MIN_MEMORY, max_memory=POSTGRES_MAX_MEMORY, + node_selectors=node_selectors, ) network_name = NETWORK_ID_TO_NAME.get(network_id, network_id) @@ -110,6 +114,7 @@ def launch_mev_relay( max_cpu=RELAY_MAX_CPU, min_memory=RELAY_MIN_MEMORY, max_memory=RELAY_MAX_MEMORY, + node_selectors=node_selectors, ), ) @@ -145,6 +150,7 @@ def launch_mev_relay( max_cpu=RELAY_MAX_CPU, min_memory=RELAY_MIN_MEMORY, max_memory=RELAY_MAX_MEMORY, + node_selectors=node_selectors, ), ) @@ -177,6 +183,7 @@ def launch_mev_relay( max_cpu=RELAY_MAX_CPU, min_memory=RELAY_MIN_MEMORY, max_memory=RELAY_MAX_MEMORY, + node_selectors=node_selectors, ), ) diff --git a/src/mev/mock_mev/mock_mev_launcher.star b/src/mev/mock_mev/mock_mev_launcher.star index a7205f61b..226831673 100644 --- a/src/mev/mock_mev/mock_mev_launcher.star +++ b/src/mev/mock_mev/mock_mev_launcher.star @@ -10,7 +10,14 @@ MIN_MEMORY = 128 MAX_MEMORY = 1024 -def launch_mock_mev(plan, el_uri, beacon_uri, jwt_secret, global_client_log_level): +def launch_mock_mev( + plan, + el_uri, + beacon_uri, + jwt_secret, + global_client_log_level, + global_node_selectors, +): mock_builder = plan.add_service( name=MOCK_MEV_SERVICE_NAME, config=ServiceConfig( @@ -31,6 +38,7 @@ def launch_mock_mev(plan, el_uri, beacon_uri, jwt_secret, global_client_log_leve max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=global_node_selectors, ), ) return "http://{0}@{1}:{2}".format( diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index 4be39d044..edcb2b149 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -81,7 +81,15 @@ def input_parser(plan, input_args): result["assertoor_params"] = get_default_assertoor_params() result["xatu_sentry_params"] = get_default_xatu_sentry_params() result["persistent"] = False + result["parallel_keystore_generation"] = False result["global_tolerations"] = [] + result["global_node_selectors"] = {} + + if constants.NETWORK_NAME.shadowfork in result["network_params"]["network"]: + shadow_base = result["network_params"]["network"].split("-shadowfork")[0] + result["network_params"][ + "deposit_contract_address" + ] = constants.DEPOSIT_CONTRACT_ADDRESS[shadow_base] if constants.NETWORK_NAME.shadowfork in result["network_params"]["network"]: shadow_base = result["network_params"]["network"].split("-shadowfork")[0] @@ -162,6 +170,7 @@ def input_parser(plan, input_args): cl_tolerations=participant["cl_tolerations"], tolerations=participant["tolerations"], validator_tolerations=participant["validator_tolerations"], + node_selectors=participant["node_selectors"], beacon_extra_params=participant["beacon_extra_params"], beacon_extra_labels=participant["beacon_extra_labels"], validator_extra_params=participant["validator_extra_params"], @@ -288,6 +297,7 @@ def input_parser(plan, input_args): xatu_server_tls=result["xatu_sentry_params"]["xatu_server_tls"], ), global_tolerations=result["global_tolerations"], + global_node_selectors=result["global_node_selectors"], ) @@ -518,6 +528,15 @@ def get_client_tolerations( return toleration_list +def get_client_node_selectors(participant_node_selectors, global_node_selectors): + node_selectors = {} + node_selectors = participant_node_selectors if participant_node_selectors else {} + if node_selectors == {}: + node_selectors = global_node_selectors if global_node_selectors else {} + + return node_selectors + + def default_input_args(): network_params = default_network_params() participants = [default_participant()] @@ -575,6 +594,7 @@ def default_participant(): "cl_tolerations": [], "validator_tolerations": [], "tolerations": [], + "node_selectors": {}, "beacon_extra_params": [], "beacon_extra_labels": {}, "validator_extra_params": [], @@ -603,6 +623,8 @@ def default_participant(): }, "blobber_enabled": False, "blobber_extra_params": [], + "global_tolerations": [], + "global_node_selectors": {}, } diff --git a/src/participant_network.star b/src/participant_network.star index 2b159e7d2..13bb1e97a 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -68,6 +68,7 @@ def launch_participant_network( persistent, xatu_sentry_params, global_tolerations, + global_node_selectors, parallel_keystore_generation=False, ): network_id = network_params.network_id @@ -113,13 +114,17 @@ def launch_participant_network( store=[StoreSpec(src="/shadowfork", name="latest_blocks")], ) - # maybe we can do the copy in the same step as the fetch? for index, participant in enumerate(participants): tolerations = input_parser.get_client_tolerations( participant.el_tolerations, participant.tolerations, global_tolerations, ) + node_selectors = input_parser.get_client_node_selectors( + participant.node_selectors, + global_node_selectors, + ) + cl_client_type = participant.cl_client_type el_client_type = participant.el_client_type @@ -160,12 +165,8 @@ def launch_participant_network( ], ), }, - env_vars={ - "RCLONE_CONFIG_MYS3_TYPE": "s3", - "RCLONE_CONFIG_MYS3_PROVIDER": "DigitalOcean", - "RCLONE_CONFIG_MYS3_ENDPOINT": "https://ams3.digitaloceanspaces.com", - }, tolerations=tolerations, + node_selectors=node_selectors, ), ) for index, participant in enumerate(participants): @@ -423,7 +424,13 @@ def launch_participant_network( for index, participant in enumerate(participants): cl_client_type = participant.cl_client_type el_client_type = participant.el_client_type - + node_selectors = input_parser.get_client_node_selectors( + participant.node_selectors, + global_node_selectors, + ) + tolerations = input_parser.get_client_tolerations( + participant.el_tolerations, participant.tolerations, global_tolerations + ) if el_client_type not in el_launchers: fail( "Unsupported launcher '{0}', need one of '{1}'".format( @@ -460,9 +467,8 @@ def launch_participant_network( participant.el_extra_labels, persistent, participant.el_client_volume_size, - participant.el_tolerations, - participant.tolerations, - global_tolerations, + tolerations, + node_selectors, ) # Add participant el additional prometheus metrics @@ -538,6 +544,10 @@ def launch_participant_network( for index, participant in enumerate(participants): cl_client_type = participant.cl_client_type el_client_type = participant.el_client_type + node_selectors = input_parser.get_client_node_selectors( + participant.node_selectors, + global_node_selectors, + ) if cl_client_type not in cl_launchers: fail( @@ -574,6 +584,7 @@ def launch_participant_network( plan, snooper_service_name, el_client_context, + node_selectors, ) plan.print( "Successfully added {0} snooper participants".format( @@ -615,6 +626,7 @@ def launch_participant_network( participant.validator_tolerations, participant.tolerations, global_tolerations, + node_selectors, participant.cl_split_mode_enabled, ) else: @@ -651,6 +663,7 @@ def launch_participant_network( participant.validator_tolerations, participant.tolerations, global_tolerations, + node_selectors, participant.cl_split_mode_enabled, ) @@ -676,6 +689,7 @@ def launch_participant_network( ethereum_metrics_exporter_service_name, el_client_context, cl_client_context, + node_selectors, ) plan.print( "Successfully added {0} ethereum metrics exporter participants".format( @@ -699,6 +713,7 @@ def launch_participant_network( xatu_sentry_params, network_params, pair_name, + node_selectors, ) plan.print( "Successfully added {0} xatu sentry participants".format( diff --git a/src/prometheus/prometheus_launcher.star b/src/prometheus/prometheus_launcher.star index 753edc6d8..4e66e8505 100644 --- a/src/prometheus/prometheus_launcher.star +++ b/src/prometheus/prometheus_launcher.star @@ -26,6 +26,7 @@ def launch_prometheus( additional_metrics_jobs, ethereum_metrics_exporter_contexts, xatu_sentry_contexts, + global_node_selectors, ): metrics_jobs = get_metrics_jobs( el_client_contexts, @@ -35,7 +36,13 @@ def launch_prometheus( xatu_sentry_contexts, ) prometheus_url = prometheus.run( - plan, metrics_jobs, MIN_CPU, MAX_CPU, MIN_MEMORY, MAX_MEMORY + plan, + metrics_jobs, + MIN_CPU, + MAX_CPU, + MIN_MEMORY, + MAX_MEMORY, + node_selectors=global_node_selectors, ) return prometheus_url diff --git a/src/snooper/snooper_engine_launcher.star b/src/snooper/snooper_engine_launcher.star index 24bc94b2e..636f0edab 100644 --- a/src/snooper/snooper_engine_launcher.star +++ b/src/snooper/snooper_engine_launcher.star @@ -25,10 +25,10 @@ MIN_MEMORY = 10 MAX_MEMORY = 300 -def launch(plan, service_name, el_client_context): +def launch(plan, service_name, el_client_context, node_selectors): snooper_service_name = "{0}".format(service_name) - snooper_config = get_config(service_name, el_client_context) + snooper_config = get_config(service_name, el_client_context, node_selectors) snooper_service = plan.add_service(snooper_service_name, snooper_config) snooper_http_port = snooper_service.ports[SNOOPER_ENGINE_RPC_PORT_ID] @@ -37,7 +37,7 @@ def launch(plan, service_name, el_client_context): ) -def get_config(service_name, el_client_context): +def get_config(service_name, el_client_context, node_selectors): engine_rpc_port_num = "http://{0}:{1}".format( el_client_context.ip_addr, el_client_context.engine_rpc_port_num, @@ -58,4 +58,5 @@ def get_config(service_name, el_client_context): max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/transaction_spammer/transaction_spammer.star b/src/transaction_spammer/transaction_spammer.star index 8aa3c4e71..275492a6d 100644 --- a/src/transaction_spammer/transaction_spammer.star +++ b/src/transaction_spammer/transaction_spammer.star @@ -9,18 +9,30 @@ MAX_MEMORY = 300 def launch_transaction_spammer( - plan, prefunded_addresses, el_uri, tx_spammer_params, electra_fork_epoch + plan, + prefunded_addresses, + el_uri, + tx_spammer_params, + electra_fork_epoch, + global_node_selectors, ): config = get_config( prefunded_addresses, el_uri, tx_spammer_params.tx_spammer_extra_args, electra_fork_epoch, + global_node_selectors, ) plan.add_service(SERVICE_NAME, config) -def get_config(prefunded_addresses, el_uri, tx_spammer_extra_args, electra_fork_epoch): +def get_config( + prefunded_addresses, + el_uri, + tx_spammer_extra_args, + electra_fork_epoch, + node_selectors, +): # Temp hack to use the old tx-fuzz image until we can get the new one working if electra_fork_epoch != None: tx_spammer_image = "ethpandaops/tx-fuzz:kaustinen-281adbc" @@ -43,4 +55,5 @@ def get_config(prefunded_addresses, el_uri, tx_spammer_extra_args, electra_fork_ max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ) diff --git a/src/xatu_sentry/xatu_sentry_launcher.star b/src/xatu_sentry/xatu_sentry_launcher.star index 3236fcfe8..39077ec37 100644 --- a/src/xatu_sentry/xatu_sentry_launcher.star +++ b/src/xatu_sentry/xatu_sentry_launcher.star @@ -22,6 +22,7 @@ def launch( xatu_sentry_params, network_params, pair_name, + node_selectors, ): config_template = read_file(static_files.XATU_SENTRY_CONFIG_TEMPLATE_FILEPATH) @@ -81,6 +82,7 @@ def launch( max_cpu=MAX_CPU, min_memory=MIN_MEMORY, max_memory=MAX_MEMORY, + node_selectors=node_selectors, ), ) From 9ceae9c74405db4e1ab6e02de541577d078434ae Mon Sep 17 00:00:00 2001 From: pk910 Date: Tue, 20 Feb 2024 08:29:07 +0100 Subject: [PATCH 19/33] feat: enable api in assertoor config (#495) enable new assertoor api for easier integration with other tools --- static_files/assertoor-config/config.yaml.tmpl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/static_files/assertoor-config/config.yaml.tmpl b/static_files/assertoor-config/config.yaml.tmpl index 3d6b3dfda..e43e27a3e 100644 --- a/static_files/assertoor-config/config.yaml.tmpl +++ b/static_files/assertoor-config/config.yaml.tmpl @@ -14,6 +14,8 @@ web: enabled: true debug: true pprof: true + api: + enabled: true validatorNames: inventoryYaml: "/validator-ranges/validator-ranges.yaml" From e48483a130ba227dafd0d0fd9ee66c6cecc3bfce Mon Sep 17 00:00:00 2001 From: Bharath Vedartham Date: Tue, 20 Feb 2024 20:29:09 +0530 Subject: [PATCH 20/33] feat: parameterize mev-boost args (#400) Adds 2 new fields to the config file under the mev_params field: mev_boost_image: mev_boost_args: The behaviour is such that, `mev-boost-args` overrides all the args of mev-boost. No extra params are added. Resolves https://github.com/kurtosis-tech/ethereum-package/issues/289 --------- Co-authored-by: Barnabas Busa Co-authored-by: franjoespejo Co-authored-by: Gyanendra Mishra Co-authored-by: Sam Calder-Mason Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: pk910 Co-authored-by: Parithosh Jayanthi Co-authored-by: franjoespejo Co-authored-by: Anton --- README.md | 4 +++- main.star | 1 + network_params.yaml | 2 ++ src/mev/mev_boost/mev_boost_launcher.star | 8 ++++---- src/package_io/input_parser.star | 2 ++ 5 files changed, 12 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index ecb40bcea..3b1773563 100644 --- a/README.md +++ b/README.md @@ -543,7 +543,7 @@ mev_type: null # Parameters if MEV is used mev_params: - # The image to use for MEV boot relay + # The image to use for MEV boost relay mev_relay_image: flashbots/mev-boost-relay # The image to use for the builder mev_builder_image: ethpandaops/flashbots-builder:main @@ -551,6 +551,8 @@ mev_params: mev_builder_cl_image: sigp/lighthouse:latest # The image to use for mev-boost mev_boost_image: flashbots/mev-boost + # Parameters for MEV Boost. This overrides all arguments of the mev-boost container + mev_boost_args: [] # Extra parameters to send to the API mev_relay_api_extra_args: [] # Extra parameters to send to the housekeeper diff --git a/main.star b/main.star index 7ef2fd7ff..b1ebc4dbf 100644 --- a/main.star +++ b/main.star @@ -260,6 +260,7 @@ def run(plan, args={}): mev_boost_service_name, network_params.network_id, mev_params.mev_boost_image, + mev_params.mev_boost_args, global_node_selectors, ) all_mevboost_contexts.append(mev_boost_context) diff --git a/network_params.yaml b/network_params.yaml index d03330642..35d2078d1 100644 --- a/network_params.yaml +++ b/network_params.yaml @@ -84,6 +84,8 @@ mev_params: mev_flood_image: flashbots/mev-flood mev_flood_extra_args: [] mev_flood_seconds_per_bundle: 15 + mev_boost_image: flashbots/mev-boost + mev_boost_args: ["mev-boost", "--relay-check"] grafana_additional_dashboards: [] persistent: false xatu_sentry_enabled: false diff --git a/src/mev/mev_boost/mev_boost_launcher.star b/src/mev/mev_boost/mev_boost_launcher.star index a54da92a4..f92cd1525 100644 --- a/src/mev/mev_boost/mev_boost_launcher.star +++ b/src/mev/mev_boost/mev_boost_launcher.star @@ -29,12 +29,14 @@ def launch( service_name, network_id, mev_boost_image, + mev_boost_args, global_node_selectors, ): config = get_config( mev_boost_launcher, network_id, mev_boost_image, + mev_boost_args, global_node_selectors, ) @@ -49,12 +51,10 @@ def get_config( mev_boost_launcher, network_id, mev_boost_image, + mev_boost_args, node_selectors, ): - command = ["mev-boost"] - - if mev_boost_launcher.should_check_relay: - command.append("-relay-check") + command = mev_boost_args return ServiceConfig( image=mev_boost_image, diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index edcb2b149..4c79f41e4 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -237,6 +237,7 @@ def input_parser(plan, input_args): mev_builder_image=result["mev_params"]["mev_builder_image"], mev_builder_cl_image=result["mev_params"]["mev_builder_cl_image"], mev_boost_image=result["mev_params"]["mev_boost_image"], + mev_boost_args=result["mev_params"]["mev_boost_args"], mev_relay_api_extra_args=result["mev_params"]["mev_relay_api_extra_args"], mev_relay_housekeeper_extra_args=result["mev_params"][ "mev_relay_housekeeper_extra_args" @@ -634,6 +635,7 @@ def get_default_mev_params(): "mev_builder_image": "flashbots/builder:latest", "mev_builder_cl_image": "sigp/lighthouse:latest", "mev_boost_image": "flashbots/mev-boost", + "mev_boost_args": ["mev-boost", "--relay-check"], "mev_relay_api_extra_args": [], "mev_relay_housekeeper_extra_args": [], "mev_relay_website_extra_args": [], From fe2de7e5a5e2446ebb0a0b191f5aa6783e132426 Mon Sep 17 00:00:00 2001 From: pk910 Date: Thu, 22 Feb 2024 15:43:00 +0100 Subject: [PATCH 21/33] feat: allow more detailed additional test configurations in assertoor_params (#498) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds the ability do use complex objects that are passed through to the assertoor config in json format. It allows defining assertoor tests like this via the kurtosis params: ``` assertoor_params: run_stability_check: false run_block_proposal_check: true tests: - file: https://raw.githubusercontent.com/ethpandaops/assertoor-test/master/assertoor-tests/block-proposal-check.yaml timeout: 2h config: someCustomTestConfig: "some value" ``` The old way to specify the link to the test only is still supported: ``` assertoor_params: run_stability_check: false run_block_proposal_check: true tests: - "https://raw.githubusercontent.com/ethpandaops/assertoor-test/master/assertoor-tests/block-proposal-check.yaml" ``` Effectively both formats can be used. The downstream implementation is unfortunately not super nice 😅 It embeds the test configs as json objects within the yaml configuration. This works fine as yaml inherits the json syntax. --- README.md | 4 ++++ src/assertoor/assertoor_launcher.star | 15 ++++++++++++++- static_files/assertoor-config/config.yaml.tmpl | 2 +- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3b1773563..ade5cd475 100644 --- a/README.md +++ b/README.md @@ -473,8 +473,12 @@ assertoor_params: run_lifecycle_test: false # Run additional tests from external test definitions + # Entries may be simple strings (link to the test file) or dictionaries with more flexibility # eg: # - https://raw.githubusercontent.com/ethpandaops/assertoor/master/example/tests/block-proposal-check.yaml + # - file: "https://raw.githubusercontent.com/ethpandaops/assertoor/master/example/tests/block-proposal-check.yaml" + # config: + # someCustomTestConfig: "some value" tests: [] diff --git a/src/assertoor/assertoor_launcher.star b/src/assertoor/assertoor_launcher.star index 94a73d0cc..35c206412 100644 --- a/src/assertoor/assertoor_launcher.star +++ b/src/assertoor/assertoor_launcher.star @@ -137,6 +137,19 @@ def get_config( def new_config_template_data( listen_port_num, client_info, validator_client_info, assertoor_params ): + additional_tests = [] + for index, testcfg in enumerate(assertoor_params.tests): + if type(testcfg) == "dict": + additional_tests.append(json.encode(testcfg)) + else: + additional_tests.append( + json.encode( + { + "file": testcfg, + } + ) + ) + return { "ListenPortNum": listen_port_num, "ClientInfo": client_info, @@ -147,7 +160,7 @@ def new_config_template_data( "RunTransactionTest": assertoor_params.run_transaction_test, "RunBlobTransactionTest": assertoor_params.run_blob_transaction_test, "RunOpcodesTransactionTest": assertoor_params.run_opcodes_transaction_test, - "AdditionalTests": assertoor_params.tests, + "AdditionalTests": additional_tests, } diff --git a/static_files/assertoor-config/config.yaml.tmpl b/static_files/assertoor-config/config.yaml.tmpl index e43e27a3e..1feeaa584 100644 --- a/static_files/assertoor-config/config.yaml.tmpl +++ b/static_files/assertoor-config/config.yaml.tmpl @@ -51,5 +51,5 @@ externalTests: - file: /tests/validator-lifecycle-test.yaml {{ end }} {{ range $test := .AdditionalTests }} -- file: "{{ $test }}" +- {{ $test }} {{- end }} From 90da2c33a77b4a0ac620ae665899963256a1ae0a Mon Sep 17 00:00:00 2001 From: Luca Winter | Serenita <70237279+eth2353@users.noreply.github.com> Date: Tue, 27 Feb 2024 10:18:48 +0100 Subject: [PATCH 22/33] feat: separate validator clients from CL clients (#497) Separates the validator clients more cleanly from the CL clients. This then allows the use of different combinations of CL/VC clients, e.g. Teku VC with Lodestar CL. The only VC that doesn't work with different beacon nodes is Prysm at the moment. The `use_separate_validator_client` flag defaults to false for CL clients that can run validators in the same process as the CL (preserving the way the `cl_split_mode_enabled` worked before this PR). I believe this can be quite useful to test different VC<->CL combinations for compatibility. --------- Co-authored-by: Barnabas Busa --- .github/tests/dencun-devnet-12.yaml | 2 +- .github/tests/ephemery.yaml | 2 +- .github/tests/mix-persistence-k8s.yaml | 4 +- .github/tests/mix-persistence.yaml | 4 +- .github/tests/mixed-cl-vc.yml | 10 + .github/tests/node-selectors.yaml | 4 +- .github/tests/split-nimbus.yaml | 12 +- .github/tests/split-teku.yaml | 12 +- .github/tests/tolerations.yaml | 6 +- README.md | 33 ++- main.star | 3 + src/cl/cl_client_context.star | 2 - src/cl/lighthouse/lighthouse_launcher.star | 170 +--------------- src/cl/lodestar/lodestar_launcher.star | 147 +------------- src/cl/nimbus/nimbus_launcher.star | 170 +--------------- src/cl/prysm/prysm_launcher.star | 189 +----------------- src/cl/teku/teku_launcher.star | 189 +----------------- src/package_io/constants.star | 8 + src/package_io/input_parser.star | 62 ++++-- src/participant.star | 4 + src/participant_network.star | 101 ++++++++-- src/prometheus/prometheus_launcher.star | 56 +++--- src/validator_client/lighthouse.star | 104 ++++++++++ src/validator_client/lodestar.star | 98 +++++++++ src/validator_client/nimbus.star | 79 ++++++++ src/validator_client/prysm.star | 92 +++++++++ src/validator_client/shared.star | 16 ++ src/validator_client/teku.star | 87 ++++++++ .../validator_client_context.star | 10 + .../validator_client_launcher.star | 179 +++++++++++++++++ 30 files changed, 910 insertions(+), 945 deletions(-) create mode 100644 .github/tests/mixed-cl-vc.yml create mode 100644 src/validator_client/lighthouse.star create mode 100644 src/validator_client/lodestar.star create mode 100644 src/validator_client/nimbus.star create mode 100644 src/validator_client/prysm.star create mode 100644 src/validator_client/shared.star create mode 100644 src/validator_client/teku.star create mode 100644 src/validator_client/validator_client_context.star create mode 100644 src/validator_client/validator_client_launcher.star diff --git a/.github/tests/dencun-devnet-12.yaml b/.github/tests/dencun-devnet-12.yaml index fd267a728..755ae74c6 100644 --- a/.github/tests/dencun-devnet-12.yaml +++ b/.github/tests/dencun-devnet-12.yaml @@ -6,7 +6,7 @@ participants: - el_client_type: nethermind el_client_image: ethpandaops/nethermind:master cl_client_type: prysm - cl_client_image: gcr.io/prysmaticlabs/prysm/beacon-chain:latest,gcr.io/prysmaticlabs/prysm/validator:latest + cl_client_image: gcr.io/prysmaticlabs/prysm/beacon-chain:latest - el_client_type: erigon el_client_image: ethpandaops/erigon:devel cl_client_type: nimbus diff --git a/.github/tests/ephemery.yaml b/.github/tests/ephemery.yaml index 49118c7ff..ac54f2ed9 100644 --- a/.github/tests/ephemery.yaml +++ b/.github/tests/ephemery.yaml @@ -6,7 +6,7 @@ participants: - el_client_type: nethermind el_client_image: ethpandaops/nethermind:master cl_client_type: prysm - cl_client_image: gcr.io/prysmaticlabs/prysm/beacon-chain:latest,gcr.io/prysmaticlabs/prysm/validator:latest + cl_client_image: gcr.io/prysmaticlabs/prysm/beacon-chain:latest - el_client_type: erigon el_client_image: ethpandaops/erigon:devel cl_client_type: nimbus diff --git a/.github/tests/mix-persistence-k8s.yaml b/.github/tests/mix-persistence-k8s.yaml index 252af09b7..4c6d8443e 100644 --- a/.github/tests/mix-persistence-k8s.yaml +++ b/.github/tests/mix-persistence-k8s.yaml @@ -1,13 +1,13 @@ participants: - el_client_type: geth cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true - el_client_type: nethermind cl_client_type: prysm - el_client_type: erigon cl_client_type: nimbus cl_client_image: ethpandaops/nimbus:unstable - cl_split_mode_enabled: true + use_separate_validator_client: true - el_client_type: besu cl_client_type: lighthouse - el_client_type: reth diff --git a/.github/tests/mix-persistence.yaml b/.github/tests/mix-persistence.yaml index 252af09b7..4c6d8443e 100644 --- a/.github/tests/mix-persistence.yaml +++ b/.github/tests/mix-persistence.yaml @@ -1,13 +1,13 @@ participants: - el_client_type: geth cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true - el_client_type: nethermind cl_client_type: prysm - el_client_type: erigon cl_client_type: nimbus cl_client_image: ethpandaops/nimbus:unstable - cl_split_mode_enabled: true + use_separate_validator_client: true - el_client_type: besu cl_client_type: lighthouse - el_client_type: reth diff --git a/.github/tests/mixed-cl-vc.yml b/.github/tests/mixed-cl-vc.yml new file mode 100644 index 000000000..bf20528b6 --- /dev/null +++ b/.github/tests/mixed-cl-vc.yml @@ -0,0 +1,10 @@ +participants: + - el_client_type: geth + cl_client_type: teku + use_separate_validator_client: true + validator_client_type: lodestar + - el_client_type: besu + cl_client_type: nimbus + use_separate_validator_client: true + validator_client_type: lighthouse +additional_services: [] diff --git a/.github/tests/node-selectors.yaml b/.github/tests/node-selectors.yaml index a011f73dc..fdd34e48e 100644 --- a/.github/tests/node-selectors.yaml +++ b/.github/tests/node-selectors.yaml @@ -1,13 +1,13 @@ participants: - el_client_type: reth cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true node_selectors: { "kubernetes.io/hostname": testing-1, } - el_client_type: reth cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true global_node_selectors: { "kubernetes.io/hostname": testing-2, } diff --git a/.github/tests/split-nimbus.yaml b/.github/tests/split-nimbus.yaml index d44805ff4..21720cf85 100644 --- a/.github/tests/split-nimbus.yaml +++ b/.github/tests/split-nimbus.yaml @@ -2,26 +2,26 @@ participants: - el_client_type: geth cl_client_type: nimbus cl_client_image: ethpandaops/nimbus:unstable - cl_split_mode_enabled: true + use_separate_validator_client: true validator_count: 0 - el_client_type: nethermind cl_client_type: nimbus - cl_split_mode_enabled: true + use_separate_validator_client: true cl_client_image: ethpandaops/nimbus:unstable - el_client_type: erigon cl_client_type: nimbus - cl_split_mode_enabled: true + use_separate_validator_client: true cl_client_image: ethpandaops/nimbus:unstable - el_client_type: besu cl_client_type: nimbus - cl_split_mode_enabled: true + use_separate_validator_client: true cl_client_image: ethpandaops/nimbus:unstable - el_client_type: reth cl_client_type: nimbus - cl_split_mode_enabled: true + use_separate_validator_client: true cl_client_image: ethpandaops/nimbus:unstable - el_client_type: ethereumjs cl_client_type: nimbus - cl_split_mode_enabled: true + use_separate_validator_client: true cl_client_image: ethpandaops/nimbus:unstable additional_services: [] diff --git a/.github/tests/split-teku.yaml b/.github/tests/split-teku.yaml index 8b99808c6..c5dbe4bae 100644 --- a/.github/tests/split-teku.yaml +++ b/.github/tests/split-teku.yaml @@ -1,21 +1,21 @@ participants: - el_client_type: geth cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true validator_count: 0 - el_client_type: nethermind cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true - el_client_type: erigon cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true - el_client_type: besu cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true - el_client_type: reth cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true - el_client_type: ethereumjs cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true additional_services: [] diff --git a/.github/tests/tolerations.yaml b/.github/tests/tolerations.yaml index 140938834..5635d2f6f 100644 --- a/.github/tests/tolerations.yaml +++ b/.github/tests/tolerations.yaml @@ -1,7 +1,7 @@ participants: - el_client_type: reth cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true cl_tolerations: - key: "node-role.kubernetes.io/master1" operator: "Exists" @@ -19,14 +19,14 @@ participants: effect: "NoSchedule" - el_client_type: reth cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true tolerations: - key: "node-role.kubernetes.io/master5" operator: "Exists" effect: "NoSchedule" - el_client_type: reth cl_client_type: teku - cl_split_mode_enabled: true + use_separate_validator_client: true additional_services: - dora global_tolerations: diff --git a/README.md b/README.md index ade5cd475..199a7803d 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ When running on a public testnet using a cloud provider's Kubernetes cluster, th 1. State Growth: The growth of the state might be faster than anticipated. This could potentially lead to issues if the default parameters become insufficient over time. It's important to monitor state growth and adjust parameters as necessary. -2. Persistent Storage Speed: Most cloud providers provision their Kubernetes clusters with relatively slow persistent storage by default. This can cause performance issues, particularly with Ethereum Light (EL) clients. +2. Persistent Storage Speed: Most cloud providers provision their Kubernetes clusters with relatively slow persistent storage by default. This can cause performance issues, particularly with Execution Layer (EL) clients. 3. Network Syncing: The disk speed provided by cloud providers may not be sufficient to sync with networks that have high demands, such as the mainnet. This could lead to syncing issues and delays. @@ -203,27 +203,40 @@ participants: # Valid values are nimbus, lighthouse, lodestar, teku, and prysm cl_client_type: lighthouse - # The Docker image that should be used for the EL client; leave blank to use the default for the client type - # Defaults by client (note that Prysm is different in that it requires two images - a Beacon and a validator - separated by a comma): + # The Docker image that should be used for the CL client; leave blank to use the default for the client type + # Defaults by client: # - lighthouse: sigp/lighthouse:latest # - teku: consensys/teku:latest # - nimbus: statusim/nimbus-eth2:multiarch-latest - # - prysm: gcr.io/prysmaticlabs/prysm/beacon-chain:latest,gcr.io/prysmaticlabs/prysm/validator:latest + # - prysm: gcr.io/prysmaticlabs/prysm/beacon-chain:latest # - lodestar: chainsafe/lodestar:next cl_client_image: "" - # The log level string that this participant's EL client should log at + # The log level string that this participant's CL client should log at # If this is emptystring then the global `logLevel` parameter's value will be translated into a string appropriate for the client (e.g. if # global `logLevel` = `info` then Teku would receive `INFO`, Prysm would receive `info`, etc.) # If this is not emptystring, then this value will override the global `logLevel` setting to allow for fine-grained control # over a specific participant's logging cl_client_log_level: "" - # A list of optional extra params that will be passed to the CL to run separate Beacon and validator nodes - # Only possible for nimbus or teku - # Please note that in order to get it to work with Nimbus, you have to use `ethpandaops/nimbus:unstable` as the image (default upstream image does not yet support this out of the box) - # Defaults to false - cl_split_mode_enabled: false + # Whether to use a separate validator client attached to the CL client. + # Defaults to false for clients that can run both in one process (Teku, Nimbus) + use_separate_validator_client: true/false + + # The type of validator client that should be used + # Valid values are nimbus, lighthouse, lodestar, teku, and prysm + # ( The prysm validator only works with a prysm CL client ) + # Defaults to matching the chosen CL client (cl_client_type) + validator_client_type: "" + + # The Docker image that should be used for the separate validator client + # Defaults by client: + # - lighthouse: sigp/lighthouse:latest + # - lodestar: chainsafe/lodestar:latest + # - nimbus: statusim/nimbus-validator-client:multiarch-latest + # - prysm: gcr.io/prysmaticlabs/prysm/validator:latest + # - teku: consensys/teku:latest + validator_client_image: "" # Persistent storage size for the CL client container (in MB) # Defaults to 0, which means that the default size for the client will be used diff --git a/main.star b/main.star index b1ebc4dbf..68bac3681 100644 --- a/main.star +++ b/main.star @@ -109,11 +109,13 @@ def run(plan, args={}): all_el_client_contexts = [] all_cl_client_contexts = [] + all_validator_client_contexts = [] all_ethereum_metrics_exporter_contexts = [] all_xatu_sentry_contexts = [] for participant in all_participants: all_el_client_contexts.append(participant.el_client_context) all_cl_client_contexts.append(participant.cl_client_context) + all_validator_client_contexts.append(participant.validator_client_context) all_ethereum_metrics_exporter_contexts.append( participant.ethereum_metrics_exporter_context ) @@ -426,6 +428,7 @@ def run(plan, args={}): plan, all_el_client_contexts, all_cl_client_contexts, + all_validator_client_contexts, prometheus_additional_metrics_jobs, all_ethereum_metrics_exporter_contexts, all_xatu_sentry_contexts, diff --git a/src/cl/cl_client_context.star b/src/cl/cl_client_context.star index a0e198014..0aeb9f35c 100644 --- a/src/cl/cl_client_context.star +++ b/src/cl/cl_client_context.star @@ -5,7 +5,6 @@ def new_cl_client_context( http_port_num, cl_nodes_metrics_info, beacon_service_name, - validator_service_name="", multiaddr="", peer_id="", snooper_enabled=False, @@ -19,7 +18,6 @@ def new_cl_client_context( http_port_num=http_port_num, cl_nodes_metrics_info=cl_nodes_metrics_info, beacon_service_name=beacon_service_name, - validator_service_name=validator_service_name, multiaddr=multiaddr, peer_id=peer_id, snooper_enabled=snooper_enabled, diff --git a/src/cl/lighthouse/lighthouse_launcher.star b/src/cl/lighthouse/lighthouse_launcher.star index c22694847..f0cead5ff 100644 --- a/src/cl/lighthouse/lighthouse_launcher.star +++ b/src/cl/lighthouse/lighthouse_launcher.star @@ -30,22 +30,7 @@ BEACON_METRICS_PORT_NUM = 5054 BEACON_MIN_CPU = 50 BEACON_MIN_MEMORY = 256 -# ---------------------------------- Validator client ------------------------------------- -VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS = "/data/lighthouse/validator-keys" -VALIDATOR_HTTP_PORT_ID = "http" -VALIDATOR_METRICS_PORT_ID = "metrics" -VALIDATOR_HTTP_PORT_NUM = 5042 -VALIDATOR_METRICS_PORT_NUM = 5064 -VALIDATOR_HTTP_PORT_WAIT_DISABLED = None - METRICS_PATH = "/metrics" -VALIDATOR_SUFFIX_SERVICE_NAME = "validator" - -# The min/max CPU/memory that the validator node can use -VALIDATOR_MIN_CPU = 50 -VALIDATOR_MAX_CPU = 300 -VALIDATOR_MIN_MEMORY = 128 -VALIDATOR_MAX_MEMORY = 512 PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER" @@ -68,20 +53,6 @@ BEACON_USED_PORTS = { ), } -VALIDATOR_USED_PORTS = { - VALIDATOR_HTTP_PORT_ID: shared_utils.new_port_spec( - VALIDATOR_HTTP_PORT_NUM, - shared_utils.TCP_PROTOCOL, - shared_utils.NOT_PROVIDED_APPLICATION_PROTOCOL, - VALIDATOR_HTTP_PORT_WAIT_DISABLED, - ), - VALIDATOR_METRICS_PORT_ID: shared_utils.new_port_spec( - VALIDATOR_METRICS_PORT_NUM, - shared_utils.TCP_PROTOCOL, - shared_utils.HTTP_APPLICATION_PROTOCOL, - ), -} - VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", @@ -105,31 +76,21 @@ def launch( bn_max_cpu, bn_min_mem, bn_max_mem, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, snooper_enabled, snooper_engine_context, blobber_enabled, blobber_extra_params, extra_beacon_params, - extra_validator_params, extra_beacon_labels, - extra_validator_labels, persistent, cl_volume_size, cl_tolerations, - validator_tolerations, participant_tolerations, global_tolerations, node_selectors, - split_mode_enabled=False, + use_separate_validator_client=True, ): beacon_service_name = "{0}".format(service_name) - validator_service_name = "{0}-{1}".format( - service_name, VALIDATOR_SUFFIX_SERVICE_NAME - ) log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS @@ -211,37 +172,6 @@ def launch( ) beacon_http_url = blobber_http_url - # Launch validator node if we have a keystore - validator_service = None - if node_keystore_files != None: - v_min_cpu = int(v_min_cpu) if int(v_min_cpu) > 0 else VALIDATOR_MIN_CPU - v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU - v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY - v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY - tolerations = input_parser.get_client_tolerations( - validator_tolerations, participant_tolerations, global_tolerations - ) - validator_config = get_validator_config( - launcher.el_cl_genesis_data, - image, - validator_service_name, - log_level, - beacon_http_url, - el_client_context, - node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, - extra_validator_params, - extra_validator_labels, - persistent, - tolerations, - node_selectors, - ) - - validator_service = plan.add_service(validator_service_name, validator_config) - # TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module beacon_node_identity_recipe = GetHttpRequestRecipe( endpoint="/eth/v1/node/identity", @@ -268,16 +198,6 @@ def launch( ) nodes_metrics_info = [beacon_node_metrics_info] - if validator_service: - validator_metrics_port = validator_service.ports[VALIDATOR_METRICS_PORT_ID] - validator_metrics_url = "{0}:{1}".format( - validator_service.ip_address, validator_metrics_port.number - ) - validator_node_metrics_info = node_metrics.new_node_metrics_info( - validator_service_name, METRICS_PATH, validator_metrics_url - ) - nodes_metrics_info.append(validator_node_metrics_info) - return cl_client_context.new_cl_client_context( "lighthouse", beacon_node_enr, @@ -285,7 +205,6 @@ def launch( BEACON_HTTP_PORT_NUM, nodes_metrics_info, beacon_service_name, - validator_service_name, beacon_multiaddr, beacon_peer_id, snooper_enabled, @@ -474,93 +393,6 @@ def get_beacon_config( ) -def get_validator_config( - el_cl_genesis_data, - image, - service_name, - log_level, - beacon_client_http_url, - el_client_context, - node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, - extra_params, - extra_labels, - persistent, - tolerations, - node_selectors, -): - validator_keys_dirpath = shared_utils.path_join( - VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS, - node_keystore_files.raw_keys_relative_dirpath, - ) - validator_secrets_dirpath = shared_utils.path_join( - VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS, - node_keystore_files.raw_secrets_relative_dirpath, - ) - - cmd = [ - "lighthouse", - "validator_client", - "--debug-level=" + log_level, - "--testnet-dir=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER, - "--validators-dir=" + validator_keys_dirpath, - # NOTE: When secrets-dir is specified, we can't add the --data-dir flag - "--secrets-dir=" + validator_secrets_dirpath, - # The node won't have a slashing protection database and will fail to start otherwise - "--init-slashing-protection", - "--http", - "--unencrypted-http-transport", - "--http-address=0.0.0.0", - "--http-port={0}".format(VALIDATOR_HTTP_PORT_NUM), - "--beacon-nodes=" + beacon_client_http_url, - # "--enable-doppelganger-protection", // Disabled to not have to wait 2 epochs before validator can start - # burn address - If unset, the validator will scream in its logs - "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, - # vvvvvvvvvvvvvvvvvvv PROMETHEUS CONFIG vvvvvvvvvvvvvvvvvvvvv - "--metrics", - "--metrics-address=0.0.0.0", - "--metrics-allow-origin=*", - "--metrics-port={0}".format(VALIDATOR_METRICS_PORT_NUM), - # ^^^^^^^^^^^^^^^^^^^ PROMETHEUS CONFIG ^^^^^^^^^^^^^^^^^^^^^ - "--graffiti=" - + constants.CL_CLIENT_TYPE.lighthouse - + "-" - + el_client_context.client_name, - ] - - if len(extra_params): - cmd.extend([param for param in extra_params]) - - files = { - constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, - VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS: node_keystore_files.files_artifact_uuid, - } - - return ServiceConfig( - image=image, - ports=VALIDATOR_USED_PORTS, - cmd=cmd, - files=files, - env_vars={RUST_BACKTRACE_ENVVAR_NAME: RUST_FULL_BACKTRACE_KEYWORD}, - min_cpu=v_min_cpu, - max_cpu=v_max_cpu, - min_memory=v_min_mem, - max_memory=v_max_mem, - labels=shared_utils.label_maker( - constants.CL_CLIENT_TYPE.lighthouse, - constants.CLIENT_TYPES.validator, - image, - el_client_context.client_name, - extra_labels, - ), - tolerations=tolerations, - node_selectors=node_selectors, - ) - - def new_lighthouse_launcher(el_cl_genesis_data, jwt_file, network): return struct( el_cl_genesis_data=el_cl_genesis_data, diff --git a/src/cl/lodestar/lodestar_launcher.star b/src/cl/lodestar/lodestar_launcher.star index 4ca3abd5b..dea87e67d 100644 --- a/src/cl/lodestar/lodestar_launcher.star +++ b/src/cl/lodestar/lodestar_launcher.star @@ -13,7 +13,6 @@ TCP_DISCOVERY_PORT_ID = "tcp-discovery" UDP_DISCOVERY_PORT_ID = "udp-discovery" BEACON_HTTP_PORT_ID = "http" METRICS_PORT_ID = "metrics" -VALIDATOR_METRICS_PORT_ID = "validator-metrics" # Port nums DISCOVERY_PORT_NUM = 9000 @@ -24,17 +23,6 @@ METRICS_PORT_NUM = 8008 BEACON_MIN_CPU = 50 BEACON_MIN_MEMORY = 256 -# ---------------------------------- Validator client ------------------------------------- -VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/validator-keys" -VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/lodestar/validator-data" -# The min/max CPU/memory that the validator node can use -VALIDATOR_MIN_CPU = 50 -VALIDATOR_MAX_CPU = 300 -VALIDATOR_MIN_MEMORY = 128 -VALIDATOR_MAX_MEMORY = 512 - -VALIDATOR_SUFFIX_SERVICE_NAME = "validator" - METRICS_PATH = "/metrics" PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER" @@ -54,13 +42,6 @@ BEACON_USED_PORTS = { ), } -VALIDATOR_USED_PORTS = { - METRICS_PORT_ID: shared_utils.new_port_spec( - METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL - ), -} - - VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", @@ -84,31 +65,21 @@ def launch( bn_max_cpu, bn_min_mem, bn_max_mem, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, snooper_enabled, snooper_engine_context, blobber_enabled, blobber_extra_params, extra_beacon_params, - extra_validator_params, extra_beacon_labels, - extra_validator_labels, persistent, cl_volume_size, cl_tolerations, - validator_tolerations, participant_tolerations, global_tolerations, node_selectors, - split_mode_enabled=False, + use_separate_validator_client=True, ): beacon_service_name = "{0}".format(service_name) - validator_service_name = "{0}-{1}".format( - service_name, VALIDATOR_SUFFIX_SERVICE_NAME - ) log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) @@ -190,36 +161,6 @@ def launch( ) beacon_http_url = blobber_http_url - # Launch validator node if we have a keystore - if node_keystore_files != None: - v_min_cpu = int(v_min_cpu) if int(v_min_cpu) > 0 else VALIDATOR_MIN_CPU - v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU - v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY - v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY - tolerations = input_parser.get_client_tolerations( - validator_tolerations, participant_tolerations, global_tolerations - ) - validator_config = get_validator_config( - launcher.el_cl_genesis_data, - image, - validator_service_name, - log_level, - beacon_http_url, - el_client_context, - node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, - extra_validator_params, - extra_validator_labels, - persistent, - tolerations, - node_selectors, - ) - - plan.add_service(validator_service_name, validator_config) - # TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module beacon_node_identity_recipe = GetHttpRequestRecipe( @@ -255,7 +196,6 @@ def launch( HTTP_PORT_NUM, nodes_metrics_info, beacon_service_name, - validator_service_name, beacon_multiaddr, beacon_peer_id, snooper_enabled, @@ -425,91 +365,6 @@ def get_beacon_config( ) -def get_validator_config( - el_cl_genesis_data, - image, - service_name, - log_level, - beacon_client_http_url, - el_client_context, - node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, - extra_params, - extra_labels, - persistent, - tolerations, - node_selectors, -): - root_dirpath = shared_utils.path_join( - VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER, service_name - ) - - validator_keys_dirpath = shared_utils.path_join( - VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER, - node_keystore_files.raw_keys_relative_dirpath, - ) - - validator_secrets_dirpath = shared_utils.path_join( - VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER, - node_keystore_files.raw_secrets_relative_dirpath, - ) - - cmd = [ - "validator", - "--logLevel=" + log_level, - # "--dataDir=" + root_dirpath, - "--paramsFile=" - + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER - + "/config.yaml", - "--beaconNodes=" + beacon_client_http_url, - "--keystoresDir=" + validator_keys_dirpath, - "--secretsDir=" + validator_secrets_dirpath, - "--suggestedFeeRecipient=" + constants.VALIDATING_REWARDS_ACCOUNT, - # vvvvvvvvvvvvvvvvvvv PROMETHEUS CONFIG vvvvvvvvvvvvvvvvvvvvv - "--metrics", - "--metrics.address=0.0.0.0", - "--metrics.port={0}".format(METRICS_PORT_NUM), - # ^^^^^^^^^^^^^^^^^^^ PROMETHEUS CONFIG ^^^^^^^^^^^^^^^^^^^^^ - "--graffiti=" - + constants.CL_CLIENT_TYPE.lodestar - + "-" - + el_client_context.client_name, - ] - - if len(extra_params) > 0: - # this is a repeated, we convert it into Starlark - cmd.extend([param for param in extra_params]) - - files = { - constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, - VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: node_keystore_files.files_artifact_uuid, - } - - return ServiceConfig( - image=image, - ports=VALIDATOR_USED_PORTS, - cmd=cmd, - files=files, - private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, - min_cpu=v_min_cpu, - max_cpu=v_max_cpu, - min_memory=v_min_mem, - max_memory=v_max_mem, - labels=shared_utils.label_maker( - constants.CL_CLIENT_TYPE.lodestar, - constants.CLIENT_TYPES.validator, - image, - el_client_context.client_name, - extra_labels, - ), - tolerations=tolerations, - node_selectors=node_selectors, - ) - - def new_lodestar_launcher(el_cl_genesis_data, jwt_file, network): return struct( el_cl_genesis_data=el_cl_genesis_data, diff --git a/src/cl/nimbus/nimbus_launcher.star b/src/cl/nimbus/nimbus_launcher.star index 71a286c56..bd6a2b0df 100644 --- a/src/cl/nimbus/nimbus_launcher.star +++ b/src/cl/nimbus/nimbus_launcher.star @@ -29,25 +29,7 @@ DEFAULT_BEACON_IMAGE_ENTRYPOINT = ["nimbus_beacon_node"] BEACON_METRICS_PATH = "/metrics" -# ---------------------------------- Validator client ------------------------------------- VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS = "/data/nimbus/validator-keys" -VALIDATOR_HTTP_PORT_ID = "http" -VALIDATOR_METRICS_PORT_ID = "metrics" -VALIDATOR_HTTP_PORT_NUM = 5042 -VALIDATOR_METRICS_PORT_NUM = 5064 -VALIDATOR_HTTP_PORT_WAIT_DISABLED = None - -VALIDATOR_SUFFIX_SERVICE_NAME = "validator" - -# The min/max CPU/memory that the validator node can use -VALIDATOR_MIN_CPU = 50 -VALIDATOR_MAX_CPU = 300 -VALIDATOR_MIN_MEMORY = 128 -VALIDATOR_MAX_MEMORY = 512 - -DEFAULT_VALIDATOR_IMAGE_ENTRYPOINT = ["nimbus_validator_client"] - -VALIDATOR_METRICS_PATH = "/metrics" # ---------------------------------- Genesis Files ---------------------------------- # Nimbus needs write access to the validator keys/secrets directories, and b/c the module container runs as root @@ -79,21 +61,6 @@ BEACON_USED_PORTS = { ), } - -VALIDATOR_USED_PORTS = { - VALIDATOR_HTTP_PORT_ID: shared_utils.new_port_spec( - VALIDATOR_HTTP_PORT_NUM, - shared_utils.TCP_PROTOCOL, - shared_utils.NOT_PROVIDED_APPLICATION_PROTOCOL, - VALIDATOR_HTTP_PORT_WAIT_DISABLED, - ), - VALIDATOR_METRICS_PORT_ID: shared_utils.new_port_spec( - VALIDATOR_METRICS_PORT_NUM, - shared_utils.TCP_PROTOCOL, - shared_utils.HTTP_APPLICATION_PROTOCOL, - ), -} - VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", @@ -119,31 +86,21 @@ def launch( bn_max_cpu, bn_min_mem, bn_max_mem, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, snooper_enabled, snooper_engine_context, blobber_enabled, blobber_extra_params, extra_beacon_params, - extra_validator_params, extra_beacon_labels, - extra_validator_labels, persistent, cl_volume_size, cl_tolerations, - validator_tolerations, participant_tolerations, global_tolerations, node_selectors, - split_mode_enabled, + use_separate_validator_client, ): beacon_service_name = "{0}".format(service_name) - validator_service_name = "{0}-{1}".format( - service_name, VALIDATOR_SUFFIX_SERVICE_NAME - ) log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS @@ -193,7 +150,7 @@ def launch( snooper_engine_context, extra_beacon_params, extra_beacon_labels, - split_mode_enabled, + use_separate_validator_client, persistent, cl_volume_size, tolerations, @@ -231,47 +188,6 @@ def launch( ) nodes_metrics_info = [nimbus_node_metrics_info] - # Launch validator node if we have a keystore - validator_service = None - if node_keystore_files != None and split_mode_enabled: - v_min_cpu = int(v_min_cpu) if int(v_min_cpu) > 0 else VALIDATOR_MIN_CPU - v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU - v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY - v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY - tolerations = input_parser.get_client_tolerations( - validator_tolerations, participant_tolerations, global_tolerations - ) - validator_config = get_validator_config( - launcher.el_cl_genesis_data, - image, - validator_service_name, - log_level, - beacon_http_url, - el_client_context, - node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, - extra_validator_params, - extra_validator_labels, - persistent, - tolerations, - node_selectors, - ) - - validator_service = plan.add_service(validator_service_name, validator_config) - - if validator_service: - validator_metrics_port = validator_service.ports[VALIDATOR_METRICS_PORT_ID] - validator_metrics_url = "{0}:{1}".format( - validator_service.ip_address, validator_metrics_port.number - ) - validator_node_metrics_info = node_metrics.new_node_metrics_info( - validator_service_name, VALIDATOR_METRICS_PATH, validator_metrics_url - ) - nodes_metrics_info.append(validator_node_metrics_info) - return cl_client_context.new_cl_client_context( "nimbus", beacon_node_enr, @@ -279,7 +195,6 @@ def launch( BEACON_HTTP_PORT_NUM, nodes_metrics_info, beacon_service_name, - validator_service_name, beacon_multiaddr, beacon_peer_id, snooper_enabled, @@ -309,7 +224,7 @@ def get_beacon_config( snooper_engine_context, extra_params, extra_labels, - split_mode_enabled, + use_separate_validator_client, persistent, cl_volume_size, tolerations, @@ -383,7 +298,7 @@ def get_beacon_config( + el_client_context.client_name, ] - if node_keystore_files != None and not split_mode_enabled: + if node_keystore_files != None and not use_separate_validator_client: cmd.extend(validator_flags) if network not in constants.PUBLIC_NETWORKS: @@ -410,7 +325,7 @@ def get_beacon_config( constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, } - if node_keystore_files != None and not split_mode_enabled: + if node_keystore_files != None and not use_separate_validator_client: files[ VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS ] = node_keystore_files.files_artifact_uuid @@ -447,81 +362,6 @@ def get_beacon_config( ) -def get_validator_config( - el_cl_genesis_data, - image, - service_name, - log_level, - beacon_http_url, - el_client_context, - node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, - extra_params, - extra_labels, - persistent, - tolerations, - node_selectors, -): - validator_keys_dirpath = "" - validator_secrets_dirpath = "" - if node_keystore_files != None: - validator_keys_dirpath = shared_utils.path_join( - VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS, - node_keystore_files.nimbus_keys_relative_dirpath, - ) - validator_secrets_dirpath = shared_utils.path_join( - VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS, - node_keystore_files.raw_secrets_relative_dirpath, - ) - - cmd = [ - "--beacon-node=" + beacon_http_url, - "--validators-dir=" + validator_keys_dirpath, - "--secrets-dir=" + validator_secrets_dirpath, - "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, - # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv - "--metrics", - "--metrics-address=0.0.0.0", - "--metrics-port={0}".format(VALIDATOR_METRICS_PORT_NUM), - "--graffiti=" - + constants.CL_CLIENT_TYPE.nimbus - + "-" - + el_client_context.client_name, - ] - - if len(extra_params) > 0: - cmd.extend([param for param in extra_params if param != "--split=true"]) - - files = { - VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS: node_keystore_files.files_artifact_uuid, - } - - return ServiceConfig( - image=image, - ports=VALIDATOR_USED_PORTS, - cmd=cmd, - entrypoint=DEFAULT_VALIDATOR_IMAGE_ENTRYPOINT, - files=files, - private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, - min_cpu=v_min_cpu, - max_cpu=v_max_cpu, - min_memory=v_min_mem, - max_memory=v_max_mem, - labels=shared_utils.label_maker( - constants.CL_CLIENT_TYPE.nimbus, - constants.CLIENT_TYPES.validator, - image, - el_client_context.client_name, - extra_labels, - ), - tolerations=tolerations, - node_selectors=node_selectors, - ) - - def new_nimbus_launcher(el_cl_genesis_data, jwt_file, network): return struct( el_cl_genesis_data=el_cl_genesis_data, diff --git a/src/cl/prysm/prysm_launcher.star b/src/cl/prysm/prysm_launcher.star index 697694d6f..189f6c3ab 100644 --- a/src/cl/prysm/prysm_launcher.star +++ b/src/cl/prysm/prysm_launcher.star @@ -4,8 +4,6 @@ cl_client_context = import_module("../../cl/cl_client_context.star") node_metrics = import_module("../../node_metrics_info.star") cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star") constants = import_module("../../package_io/constants.star") -IMAGE_SEPARATOR_DELIMITER = "," -EXPECTED_NUM_IMAGES = 2 # ---------------------------------- Beacon client ------------------------------------- BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/prysm/beacon-data/" @@ -28,23 +26,7 @@ BEACON_MONITORING_PORT_NUM = 8080 BEACON_MIN_CPU = 100 BEACON_MIN_MEMORY = 256 -# ---------------------------------- Validator client ------------------------------------- -VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/prysm/validator-data/" -VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/validator-keys" -PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/prysm-password" - -# Port IDs -VALIDATOR_MONITORING_PORT_NUM = 8081 -VALIDATOR_MONITORING_PORT_ID = "monitoring" - METRICS_PATH = "/metrics" -VALIDATOR_SUFFIX_SERVICE_NAME = "validator" - -# The min/max CPU/memory that the validator node can use -VALIDATOR_MIN_CPU = 50 -VALIDATOR_MAX_CPU = 300 -VALIDATOR_MIN_MEMORY = 64 -VALIDATOR_MAX_MEMORY = 256 MIN_PEERS = 1 @@ -67,12 +49,6 @@ BEACON_NODE_USED_PORTS = { ), } -VALIDATOR_NODE_USED_PORTS = { - VALIDATOR_MONITORING_PORT_ID: shared_utils.new_port_spec( - VALIDATOR_MONITORING_PORT_NUM, shared_utils.TCP_PROTOCOL - ), -} - VERBOSITY_LEVELS = { constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", @@ -86,7 +62,7 @@ def launch( plan, launcher, service_name, - images, + image, participant_log_level, global_log_level, bootnode_contexts, @@ -96,46 +72,21 @@ def launch( bn_max_cpu, bn_min_mem, bn_max_mem, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, snooper_enabled, snooper_engine_context, blobber_enabled, blobber_extra_params, extra_beacon_params, - extra_validator_params, extra_beacon_labels, - extra_validator_labels, persistent, cl_volume_size, cl_tolerations, - validator_tolerations, participant_tolerations, global_tolerations, node_selectors, - split_mode_enabled=False, + use_separate_validator_client=True, ): - split_images = images.split(IMAGE_SEPARATOR_DELIMITER) - if len(split_images) != EXPECTED_NUM_IMAGES: - fail( - "Expected {0} images but got {1}".format( - EXPECTED_NUM_IMAGES, len(split_images) - ) - ) - beacon_image, validator_image = split_images - - if beacon_image.strip() == "": - fail("An empty beacon image was provided") - - if validator_image.strip() == "": - fail("An empty validator image was provided") - beacon_service_name = "{0}".format(service_name) - validator_service_name = "{0}-{1}".format( - service_name, VALIDATOR_SUFFIX_SERVICE_NAME - ) log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) @@ -170,7 +121,7 @@ def launch( launcher.el_cl_genesis_data, launcher.jwt_file, launcher.network, - beacon_image, + image, beacon_service_name, bootnode_contexts, el_client_context, @@ -196,40 +147,6 @@ def launch( beacon_http_endpoint = "{0}:{1}".format(beacon_service.ip_address, HTTP_PORT_NUM) beacon_rpc_endpoint = "{0}:{1}".format(beacon_service.ip_address, RPC_PORT_NUM) - # Launch validator node if we have a keystore file - validator_service = None - if node_keystore_files != None: - v_min_cpu = int(v_min_cpu) if int(v_min_cpu) > 0 else VALIDATOR_MIN_CPU - v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU - v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY - v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY - tolerations = input_parser.get_client_tolerations( - validator_tolerations, participant_tolerations, global_tolerations - ) - validator_config = get_validator_config( - launcher.el_cl_genesis_data, - validator_image, - validator_service_name, - log_level, - beacon_rpc_endpoint, - beacon_http_endpoint, - el_client_context, - node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, - extra_validator_params, - extra_validator_labels, - launcher.prysm_password_relative_filepath, - launcher.prysm_password_artifact_uuid, - persistent, - tolerations, - node_selectors, - ) - - validator_service = plan.add_service(validator_service_name, validator_config) - # TODO(old) add validator availability using the validator API: https://ethereum.github.io/beacon-APIs/?urls.primaryName=v1#/ValidatorRequiredApi | from eth2-merge-kurtosis-module beacon_node_identity_recipe = GetHttpRequestRecipe( endpoint="/eth/v1/node/identity", @@ -256,16 +173,6 @@ def launch( ) nodes_metrics_info = [beacon_node_metrics_info] - if validator_service: - validator_metrics_port = validator_service.ports[VALIDATOR_MONITORING_PORT_ID] - validator_metrics_url = "{0}:{1}".format( - validator_service.ip_address, validator_metrics_port.number - ) - validator_node_metrics_info = node_metrics.new_node_metrics_info( - validator_service_name, METRICS_PATH, validator_metrics_url - ) - nodes_metrics_info.append(validator_node_metrics_info) - return cl_client_context.new_cl_client_context( "prysm", beacon_node_enr, @@ -273,7 +180,6 @@ def launch( HTTP_PORT_NUM, nodes_metrics_info, beacon_service_name, - validator_service_name, beacon_multiaddr, beacon_peer_id, snooper_enabled, @@ -441,95 +347,6 @@ def get_beacon_config( ) -def get_validator_config( - el_cl_genesis_data, - validator_image, - service_name, - log_level, - beacon_rpc_endpoint, - beacon_http_endpoint, - el_client_context, - node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, - extra_params, - extra_labels, - prysm_password_relative_filepath, - prysm_password_artifact_uuid, - persistent, - tolerations, - node_selectors, -): - validator_keys_dirpath = shared_utils.path_join( - VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER, - node_keystore_files.prysm_relative_dirpath, - ) - validator_secrets_dirpath = shared_utils.path_join( - PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER, - prysm_password_relative_filepath, - ) - - cmd = [ - "--accept-terms-of-use=true", # it's mandatory in order to run the node - "--chain-config-file=" - + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER - + "/config.yaml", - "--beacon-rpc-gateway-provider=" + beacon_http_endpoint, - "--beacon-rpc-provider=" + beacon_rpc_endpoint, - "--wallet-dir=" + validator_keys_dirpath, - "--wallet-password-file=" + validator_secrets_dirpath, - # "--datadir=" + VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER, - "--monitoring-port={0}".format(VALIDATOR_MONITORING_PORT_NUM), - "--verbosity=" + log_level, - "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, - # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv - "--disable-monitoring=false", - "--monitoring-host=0.0.0.0", - "--monitoring-port={0}".format(VALIDATOR_MONITORING_PORT_NUM), - # ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^ - "--graffiti=" - + constants.CL_CLIENT_TYPE.prysm - + "-" - + el_client_context.client_name, - ] - - if len(extra_params) > 0: - # we do the for loop as otherwise its a proto repeated array - cmd.extend([param for param in extra_params]) - files = { - constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, - VALIDATOR_KEYS_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: node_keystore_files.files_artifact_uuid, - PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: prysm_password_artifact_uuid, - } - if persistent: - files[VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER] = Directory( - persistent_key="data-{0}".format(service_name) - ) - - return ServiceConfig( - image=validator_image, - ports=VALIDATOR_NODE_USED_PORTS, - cmd=cmd, - files=files, - private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, - min_cpu=v_min_cpu, - max_cpu=v_max_cpu, - min_memory=v_min_mem, - max_memory=v_max_mem, - labels=shared_utils.label_maker( - constants.CL_CLIENT_TYPE.prysm, - constants.CLIENT_TYPES.validator, - validator_image, - el_client_context.client_name, - extra_labels, - ), - tolerations=tolerations, - node_selectors=node_selectors, - ) - - def new_prysm_launcher( el_cl_genesis_data, jwt_file, diff --git a/src/cl/teku/teku_launcher.star b/src/cl/teku/teku_launcher.star index 80aae97b8..abc0a87f0 100644 --- a/src/cl/teku/teku_launcher.star +++ b/src/cl/teku/teku_launcher.star @@ -26,30 +26,9 @@ BEACON_MIN_CPU = 50 BEACON_MIN_MEMORY = 1024 BEACON_METRICS_PATH = "/metrics" -# ---------------------------------- Validator client ------------------------------------- -# These will get mounted as root and Teku needs directory write permissions, so we'll copy this -# into the Teku user's home directory to get around it -VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/teku/teku-validator-data" VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER = "/validator-keys" -VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS = "/validator-keys" -VALIDATOR_HTTP_PORT_ID = "http" -VALIDATOR_METRICS_PORT_ID = "metrics" -VALIDATOR_HTTP_PORT_NUM = 5042 -VALIDATOR_METRICS_PORT_NUM = 5064 -VALIDATOR_HTTP_PORT_WAIT_DISABLED = None - -VALIDATOR_SUFFIX_SERVICE_NAME = "validator" - -# The min/max CPU/memory that the validator node can use -VALIDATOR_MIN_CPU = 50 -VALIDATOR_MAX_CPU = 300 -VALIDATOR_MIN_MEMORY = 128 -VALIDATOR_MAX_MEMORY = 512 - -VALIDATOR_METRICS_PATH = "/metrics" - MIN_PEERS = 1 PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER" @@ -69,20 +48,6 @@ BEACON_USED_PORTS = { ), } -VALIDATOR_USED_PORTS = { - VALIDATOR_HTTP_PORT_ID: shared_utils.new_port_spec( - VALIDATOR_HTTP_PORT_NUM, - shared_utils.TCP_PROTOCOL, - shared_utils.NOT_PROVIDED_APPLICATION_PROTOCOL, - VALIDATOR_HTTP_PORT_WAIT_DISABLED, - ), - VALIDATOR_METRICS_PORT_ID: shared_utils.new_port_spec( - VALIDATOR_METRICS_PORT_NUM, - shared_utils.TCP_PROTOCOL, - shared_utils.HTTP_APPLICATION_PROTOCOL, - ), -} - ENTRYPOINT_ARGS = ["sh", "-c"] @@ -109,31 +74,21 @@ def launch( bn_max_cpu, bn_min_mem, bn_max_mem, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, snooper_enabled, snooper_engine_context, blobber_enabled, blobber_extra_params, extra_beacon_params, - extra_validator_params, extra_beacon_labels, - extra_validator_labels, persistent, cl_volume_size, cl_tolerations, - validator_tolerations, participant_tolerations, global_tolerations, node_selectors, - split_mode_enabled, + use_separate_validator_client, ): beacon_service_name = "{0}".format(service_name) - validator_service_name = "{0}-{1}".format( - service_name, VALIDATOR_SUFFIX_SERVICE_NAME - ) log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS ) @@ -142,9 +97,7 @@ def launch( cl_tolerations, participant_tolerations, global_tolerations ) - extra_params = [param for param in extra_beacon_params] + [ - param for param in extra_validator_params - ] + extra_params = [param for param in extra_beacon_params] network_name = shared_utils.get_network_name(launcher.network) @@ -186,7 +139,7 @@ def launch( snooper_engine_context, extra_beacon_params, extra_beacon_labels, - split_mode_enabled, + use_separate_validator_client, persistent, cl_volume_size, tolerations, @@ -226,48 +179,6 @@ def launch( ) nodes_metrics_info = [beacon_node_metrics_info] - # Launch validator node if we have a keystore - validator_service = None - if node_keystore_files != None and split_mode_enabled: - v_min_cpu = int(v_min_cpu) if int(v_min_cpu) > 0 else VALIDATOR_MIN_CPU - v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else VALIDATOR_MAX_CPU - v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else VALIDATOR_MIN_MEMORY - v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else VALIDATOR_MAX_MEMORY - tolerations = input_parser.get_client_tolerations( - validator_tolerations, participant_tolerations, global_tolerations - ) - validator_config = get_validator_config( - launcher.el_cl_genesis_data, - image, - validator_service_name, - log_level, - beacon_http_url, - el_client_context, - node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, - validator_service_name, - extra_validator_params, - extra_validator_labels, - persistent, - tolerations, - node_selectors, - ) - - validator_service = plan.add_service(validator_service_name, validator_config) - - if validator_service: - validator_metrics_port = validator_service.ports[VALIDATOR_METRICS_PORT_ID] - validator_metrics_url = "{0}:{1}".format( - validator_service.ip_address, validator_metrics_port.number - ) - validator_node_metrics_info = node_metrics.new_node_metrics_info( - validator_service_name, VALIDATOR_METRICS_PATH, validator_metrics_url - ) - nodes_metrics_info.append(validator_node_metrics_info) - return cl_client_context.new_cl_client_context( "teku", beacon_node_enr, @@ -275,7 +186,6 @@ def launch( BEACON_HTTP_PORT_NUM, nodes_metrics_info, beacon_service_name, - validator_service_name, multiaddr=beacon_multiaddr, peer_id=beacon_peer_id, snooper_enabled=snooper_enabled, @@ -305,7 +215,7 @@ def get_beacon_config( snooper_engine_context, extra_params, extra_labels, - split_mode_enabled, + use_separate_validator_client, persistent, cl_volume_size, tolerations, @@ -382,7 +292,7 @@ def get_beacon_config( + el_client_context.client_name, ] - if node_keystore_files != None and not split_mode_enabled: + if node_keystore_files != None and not use_separate_validator_client: cmd.extend(validator_flags) if network not in constants.PUBLIC_NETWORKS: @@ -456,7 +366,7 @@ def get_beacon_config( constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, } - if node_keystore_files != None and not split_mode_enabled: + if node_keystore_files != None and not use_separate_validator_client: files[ VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER ] = node_keystore_files.files_artifact_uuid @@ -493,93 +403,6 @@ def get_beacon_config( ) -def get_validator_config( - el_cl_genesis_data, - image, - service_name, - log_level, - beacon_http_url, - el_client_context, - node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, - validator_service_name, - extra_params, - extra_labels, - persistent, - tolerations, - node_selectors, -): - validator_keys_dirpath = "" - validator_secrets_dirpath = "" - if node_keystore_files != None: - validator_keys_dirpath = shared_utils.path_join( - VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS, - node_keystore_files.teku_keys_relative_dirpath, - ) - validator_secrets_dirpath = shared_utils.path_join( - VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS, - node_keystore_files.teku_secrets_relative_dirpath, - ) - - cmd = [ - "validator-client", - "--logging=" + log_level, - "--network=" - + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER - + "/config.yaml", - # "--data-path=" + VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER, - # "--data-validator-path=" + VALIDATOR_DATA_DIRPATH_ON_SERVICE_CONTAINER, - "--beacon-node-api-endpoint=" + beacon_http_url, - "--validator-keys={0}:{1}".format( - validator_keys_dirpath, - validator_secrets_dirpath, - ), - "--validators-proposer-default-fee-recipient=" - + constants.VALIDATING_REWARDS_ACCOUNT, - "--validators-graffiti=" - + constants.CL_CLIENT_TYPE.teku - + "-" - + el_client_context.client_name, - # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv - "--metrics-enabled=true", - "--metrics-host-allowlist=*", - "--metrics-interface=0.0.0.0", - "--metrics-port={0}".format(VALIDATOR_METRICS_PORT_NUM), - ] - - if len(extra_params) > 0: - cmd.extend([param for param in extra_params if param != "--split=true"]) - - files = { - constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, - VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS: node_keystore_files.files_artifact_uuid, - } - - return ServiceConfig( - image=image, - ports=VALIDATOR_USED_PORTS, - cmd=cmd, - files=files, - private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, - min_cpu=v_min_cpu, - max_cpu=v_max_cpu, - min_memory=v_min_mem, - max_memory=v_max_mem, - labels=shared_utils.label_maker( - constants.CL_CLIENT_TYPE.teku, - constants.CLIENT_TYPES.validator, - image, - el_client_context.client_name, - extra_labels, - ), - tolerations=tolerations, - node_selectors=node_selectors, - ) - - def new_teku_launcher(el_cl_genesis_data, jwt_file, network): return struct( el_cl_genesis_data=el_cl_genesis_data, jwt_file=jwt_file, network=network diff --git a/src/package_io/constants.star b/src/package_io/constants.star index 2e9f1c06e..9d277958f 100644 --- a/src/package_io/constants.star +++ b/src/package_io/constants.star @@ -16,6 +16,14 @@ CL_CLIENT_TYPE = struct( lodestar="lodestar", ) +VC_CLIENT_TYPE = struct( + lighthouse="lighthouse", + lodestar="lodestar", + nimbus="nimbus", + prysm="prysm", + teku="teku", +) + GLOBAL_CLIENT_LOG_LEVEL = struct( info="info", error="error", diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index 4c79f41e4..5b4a9bb5a 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -17,10 +17,18 @@ DEFAULT_CL_IMAGES = { "lighthouse": "sigp/lighthouse:latest", "teku": "consensys/teku:latest", "nimbus": "statusim/nimbus-eth2:multiarch-latest", - "prysm": "gcr.io/prysmaticlabs/prysm/beacon-chain:latest,gcr.io/prysmaticlabs/prysm/validator:latest", + "prysm": "gcr.io/prysmaticlabs/prysm/beacon-chain:latest", "lodestar": "chainsafe/lodestar:latest", } +DEFAULT_VC_IMAGES = { + "lighthouse": "sigp/lighthouse:latest", + "lodestar": "chainsafe/lodestar:latest", + "nimbus": "statusim/nimbus-validator-client:multiarch-latest", + "prysm": "gcr.io/prysmaticlabs/prysm/validator:latest", + "teku": "consensys/teku:latest", +} + MEV_BOOST_RELAY_DEFAULT_IMAGE = "flashbots/mev-boost-relay:0.27" MEV_BOOST_RELAY_IMAGE_NON_ZERO_CAPELLA = "flashbots/mev-boost-relay:0.26" @@ -166,10 +174,15 @@ def input_parser(plan, input_args): cl_client_image=participant["cl_client_image"], cl_client_log_level=participant["cl_client_log_level"], cl_client_volume_size=participant["cl_client_volume_size"], - cl_split_mode_enabled=participant["cl_split_mode_enabled"], cl_tolerations=participant["cl_tolerations"], - tolerations=participant["tolerations"], + use_separate_validator_client=participant[ + "use_separate_validator_client" + ], + validator_client_type=participant["validator_client_type"], + validator_client_image=participant["validator_client_image"], + validator_client_log_level=participant["validator_client_log_level"], validator_tolerations=participant["validator_tolerations"], + tolerations=participant["tolerations"], node_selectors=participant["node_selectors"], beacon_extra_params=participant["beacon_extra_params"], beacon_extra_labels=participant["beacon_extra_labels"], @@ -331,22 +344,13 @@ def parse_network_params(input_args): for index, participant in enumerate(result["participants"]): el_client_type = participant["el_client_type"] cl_client_type = participant["cl_client_type"] + validator_client_type = participant["validator_client_type"] if cl_client_type in (NIMBUS_NODE_NAME) and ( result["network_params"]["seconds_per_slot"] < 12 ): fail("nimbus can't be run with slot times below 12 seconds") - if participant["cl_split_mode_enabled"] and cl_client_type not in ( - "nimbus", - "teku", - ): - fail( - "split mode is only supported for nimbus and teku clients, but you specified {0}".format( - cl_client_type - ) - ) - el_image = participant["el_client_image"] if el_image == "": default_image = DEFAULT_EL_IMAGES.get(el_client_type, "") @@ -369,6 +373,33 @@ def parse_network_params(input_args): ) participant["cl_client_image"] = default_image + if participant["use_separate_validator_client"] == None: + # Default to false for CL clients that can run validator clients + # in the same process. + if cl_client_type in ( + constants.CL_CLIENT_TYPE.nimbus, + constants.CL_CLIENT_TYPE.teku, + ): + participant["use_separate_validator_client"] = False + else: + participant["use_separate_validator_client"] = True + + if validator_client_type == "": + # Defaults to matching the chosen CL client + validator_client_type = cl_client_type + participant["validator_client_type"] = validator_client_type + + validator_client_image = participant["validator_client_image"] + if validator_client_image == "": + default_image = DEFAULT_VC_IMAGES.get(validator_client_type, "") + if default_image == "": + fail( + "{0} received an empty image name and we don't have a default for it".format( + validator_client_type + ) + ) + participant["validator_client_image"] = default_image + snooper_enabled = participant["snooper_enabled"] if snooper_enabled == False: default_snooper_enabled = result["snooper_enabled"] @@ -591,7 +622,10 @@ def default_participant(): "cl_client_image": "", "cl_client_log_level": "", "cl_client_volume_size": 0, - "cl_split_mode_enabled": False, + "use_separate_validator_client": None, + "validator_client_type": "", + "validator_client_log_level": "", + "validator_client_image": "", "cl_tolerations": [], "validator_tolerations": [], "tolerations": [], diff --git a/src/participant.star b/src/participant.star index 315a4a51c..58ed4f4b5 100644 --- a/src/participant.star +++ b/src/participant.star @@ -1,8 +1,10 @@ def new_participant( el_client_type, cl_client_type, + validator_client_type, el_client_context, cl_client_context, + validator_client_context, snooper_engine_context, ethereum_metrics_exporter_context, xatu_sentry_context, @@ -10,8 +12,10 @@ def new_participant( return struct( el_client_type=el_client_type, cl_client_type=cl_client_type, + validator_client_type=validator_client_type, el_client_context=el_client_context, cl_client_context=cl_client_context, + validator_client_context=validator_client_context, snooper_engine_context=snooper_engine_context, ethereum_metrics_exporter_context=ethereum_metrics_exporter_context, xatu_sentry_context=xatu_sentry_context, diff --git a/src/participant_network.star b/src/participant_network.star index 13bb1e97a..aeea46048 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -28,6 +28,8 @@ nimbus = import_module("./cl/nimbus/nimbus_launcher.star") prysm = import_module("./cl/prysm/prysm_launcher.star") teku = import_module("./cl/teku/teku_launcher.star") +validator_client = import_module("./validator_client/validator_client_launcher.star") + snooper = import_module("./snooper/snooper_engine_launcher.star") ethereum_metrics_exporter = import_module( @@ -608,26 +610,19 @@ def launch_participant_network( participant.bn_max_cpu, participant.bn_min_mem, participant.bn_max_mem, - participant.v_min_cpu, - participant.v_max_cpu, - participant.v_min_mem, - participant.v_max_mem, participant.snooper_enabled, snooper_engine_context, participant.blobber_enabled, participant.blobber_extra_params, participant.beacon_extra_params, - participant.validator_extra_params, participant.beacon_extra_labels, - participant.validator_extra_labels, persistent, participant.cl_client_volume_size, participant.cl_tolerations, - participant.validator_tolerations, participant.tolerations, global_tolerations, node_selectors, - participant.cl_split_mode_enabled, + participant.use_separate_validator_client, ) else: boot_cl_client_ctx = all_cl_client_contexts @@ -645,26 +640,19 @@ def launch_participant_network( participant.bn_max_cpu, participant.bn_min_mem, participant.bn_max_mem, - participant.v_min_cpu, - participant.v_max_cpu, - participant.v_min_mem, - participant.v_max_mem, participant.snooper_enabled, snooper_engine_context, participant.blobber_enabled, participant.blobber_extra_params, participant.beacon_extra_params, - participant.validator_extra_params, participant.beacon_extra_labels, - participant.validator_extra_labels, persistent, participant.cl_client_volume_size, participant.cl_tolerations, - participant.validator_tolerations, participant.tolerations, global_tolerations, node_selectors, - participant.cl_split_mode_enabled, + participant.use_separate_validator_client, ) # Add participant cl additional prometheus labels @@ -725,14 +713,93 @@ def launch_participant_network( plan.print("Successfully added {0} CL participants".format(num_participants)) + all_validator_client_contexts = [] + # Some CL clients cannot run validator clients in the same process and need + # a separate validator client + _cls_that_need_separate_vc = [ + constants.CL_CLIENT_TYPE.prysm, + constants.CL_CLIENT_TYPE.lodestar, + constants.CL_CLIENT_TYPE.lighthouse, + ] + for index, participant in enumerate(participants): + cl_client_type = participant.cl_client_type + validator_client_type = participant.validator_client_type + + if participant.use_separate_validator_client == None: + # This should only be the case for the MEV participant, + # the regular participants default to False/True + all_validator_client_contexts.append(None) + continue + + if ( + cl_client_type in _cls_that_need_separate_vc + and not participant.use_separate_validator_client + ): + fail("{0} needs a separate validator client!".format(cl_client_type)) + + if not participant.use_separate_validator_client: + all_validator_client_contexts.append(None) + continue + + el_client_context = all_el_client_contexts[index] + cl_client_context = all_cl_client_contexts[index] + + # Zero-pad the index using the calculated zfill value + index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants)))) + + plan.print( + "Using separate validator client for participant #{0}".format(index_str) + ) + + vc_keystores = None + if participant.validator_count != 0: + vc_keystores = preregistered_validator_keys_for_nodes[index] + + validator_client_context = validator_client.launch( + plan=plan, + launcher=validator_client.new_validator_client_launcher( + el_cl_genesis_data=el_cl_data + ), + service_name="validator-client-{0}-{1}".format( + index_str, validator_client_type + ), + validator_client_type=validator_client_type, + image=participant.validator_client_image, + participant_log_level=participant.validator_client_log_level, + global_log_level=global_log_level, + cl_client_context=cl_client_context, + el_client_context=el_client_context, + node_keystore_files=vc_keystores, + v_min_cpu=participant.v_min_cpu, + v_max_cpu=participant.v_max_cpu, + v_min_mem=participant.v_min_mem, + v_max_mem=participant.v_max_mem, + extra_params=participant.validator_extra_params, + extra_labels=participant.validator_extra_labels, + prysm_password_relative_filepath=prysm_password_relative_filepath, + prysm_password_artifact_uuid=prysm_password_artifact_uuid, + validator_tolerations=participant.validator_tolerations, + participant_tolerations=participant.tolerations, + global_tolerations=global_tolerations, + node_selectors=node_selectors, + ) + all_validator_client_contexts.append(validator_client_context) + + if validator_client_context and validator_client_context.metrics_info: + validator_client_context.metrics_info[ + "config" + ] = participant.prometheus_config + all_participants = [] for index, participant in enumerate(participants): el_client_type = participant.el_client_type cl_client_type = participant.cl_client_type + validator_client_type = participant.validator_client_type el_client_context = all_el_client_contexts[index] cl_client_context = all_cl_client_contexts[index] + validator_client_context = all_validator_client_contexts[index] if participant.snooper_enabled: snooper_engine_context = all_snooper_engine_contexts[index] @@ -751,8 +818,10 @@ def launch_participant_network( participant_entry = participant_module.new_participant( el_client_type, cl_client_type, + validator_client_type, el_client_context, cl_client_context, + validator_client_context, snooper_engine_context, ethereum_metrics_exporter_context, xatu_sentry_context, diff --git a/src/prometheus/prometheus_launcher.star b/src/prometheus/prometheus_launcher.star index 4e66e8505..acb83a557 100644 --- a/src/prometheus/prometheus_launcher.star +++ b/src/prometheus/prometheus_launcher.star @@ -23,6 +23,7 @@ def launch_prometheus( plan, el_client_contexts, cl_client_contexts, + validator_client_contexts, additional_metrics_jobs, ethereum_metrics_exporter_contexts, xatu_sentry_contexts, @@ -31,6 +32,7 @@ def launch_prometheus( metrics_jobs = get_metrics_jobs( el_client_contexts, cl_client_contexts, + validator_client_contexts, additional_metrics_jobs, ethereum_metrics_exporter_contexts, xatu_sentry_contexts, @@ -51,6 +53,7 @@ def launch_prometheus( def get_metrics_jobs( el_client_contexts, cl_client_contexts, + validator_client_contexts, additional_metrics_jobs, ethereum_metrics_exporter_contexts, xatu_sentry_contexts, @@ -118,38 +121,29 @@ def get_metrics_jobs( scrape_interval=scrape_interval, ) ) - if ( - len(context.cl_nodes_metrics_info) >= 2 - and context.cl_nodes_metrics_info[1] != None - ): - # Adding validator node metrics - validator_metrics_info = context.cl_nodes_metrics_info[1] - scrape_interval = PROMETHEUS_DEFAULT_SCRAPE_INTERVAL - labels = { - "service": context.validator_service_name, - "client_type": VALIDATOR_CLIENT_TYPE, - "client_name": context.client_name, - } - additional_config = validator_metrics_info[ - METRICS_INFO_ADDITIONAL_CONFIG_KEY - ] - if additional_config != None: - if additional_config.labels != None: - labels.update(additional_config.labels) - if ( - additional_config.scrape_interval != None - and additional_config.scrape_interval != "" - ): - scrape_interval = additional_config.scrape_interval - metrics_jobs.append( - new_metrics_job( - job_name=validator_metrics_info[METRICS_INFO_NAME_KEY], - endpoint=validator_metrics_info[METRICS_INFO_URL_KEY], - metrics_path=validator_metrics_info[METRICS_INFO_PATH_KEY], - labels=labels, - scrape_interval=scrape_interval, - ) + + # Adding validator clients metrics jobs + for context in validator_client_contexts: + if context == None: + continue + metrics_info = context.metrics_info + + scrape_interval = PROMETHEUS_DEFAULT_SCRAPE_INTERVAL + labels = { + "service": context.service_name, + "client_type": VALIDATOR_CLIENT_TYPE, + "client_name": context.client_name, + } + + metrics_jobs.append( + new_metrics_job( + job_name=metrics_info[METRICS_INFO_NAME_KEY], + endpoint=metrics_info[METRICS_INFO_URL_KEY], + metrics_path=metrics_info[METRICS_INFO_PATH_KEY], + labels=labels, + scrape_interval=scrape_interval, ) + ) # Adding ethereum-metrics-exporter metrics jobs for context in ethereum_metrics_exporter_contexts: diff --git a/src/validator_client/lighthouse.star b/src/validator_client/lighthouse.star new file mode 100644 index 000000000..2fcc833ce --- /dev/null +++ b/src/validator_client/lighthouse.star @@ -0,0 +1,104 @@ +constants = import_module("../package_io/constants.star") +input_parser = import_module("../package_io/input_parser.star") +shared_utils = import_module("../shared_utils/shared_utils.star") +validator_client_shared = import_module("./shared.star") + +RUST_BACKTRACE_ENVVAR_NAME = "RUST_BACKTRACE" +RUST_FULL_BACKTRACE_KEYWORD = "full" + +VERBOSITY_LEVELS = { + constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", + constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", + constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", + constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug", + constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace", +} + + +def get_config( + el_cl_genesis_data, + image, + participant_log_level, + global_log_level, + beacon_http_url, + cl_client_context, + el_client_context, + node_keystore_files, + v_min_cpu, + v_max_cpu, + v_min_mem, + v_max_mem, + extra_params, + extra_labels, + tolerations, + node_selectors, +): + log_level = input_parser.get_client_log_level_or_default( + participant_log_level, global_log_level, VERBOSITY_LEVELS + ) + + validator_keys_dirpath = shared_utils.path_join( + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + node_keystore_files.raw_keys_relative_dirpath, + ) + validator_secrets_dirpath = shared_utils.path_join( + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + node_keystore_files.raw_secrets_relative_dirpath, + ) + + cmd = [ + "lighthouse", + "validator_client", + "--debug-level=" + log_level, + "--testnet-dir=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER, + "--validators-dir=" + validator_keys_dirpath, + # NOTE: When secrets-dir is specified, we can't add the --data-dir flag + "--secrets-dir=" + validator_secrets_dirpath, + # The node won't have a slashing protection database and will fail to start otherwise + "--init-slashing-protection", + "--beacon-nodes=" + beacon_http_url, + # "--enable-doppelganger-protection", // Disabled to not have to wait 2 epochs before validator can start + # burn address - If unset, the validator will scream in its logs + "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, + # vvvvvvvvvvvvvvvvvvv PROMETHEUS CONFIG vvvvvvvvvvvvvvvvvvvvv + "--metrics", + "--metrics-address=0.0.0.0", + "--metrics-allow-origin=*", + "--metrics-port={0}".format( + validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM + ), + # ^^^^^^^^^^^^^^^^^^^ PROMETHEUS CONFIG ^^^^^^^^^^^^^^^^^^^^^ + "--graffiti=" + + cl_client_context.client_name + + "-" + + el_client_context.client_name, + ] + + if len(extra_params): + cmd.extend([param for param in extra_params]) + + files = { + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + } + + return ServiceConfig( + image=image, + ports=validator_client_shared.VALIDATOR_CLIENT_USED_PORTS, + cmd=cmd, + files=files, + env_vars={RUST_BACKTRACE_ENVVAR_NAME: RUST_FULL_BACKTRACE_KEYWORD}, + min_cpu=v_min_cpu, + max_cpu=v_max_cpu, + min_memory=v_min_mem, + max_memory=v_max_mem, + labels=shared_utils.label_maker( + constants.VC_CLIENT_TYPE.lighthouse, + constants.CLIENT_TYPES.validator, + image, + cl_client_context.client_name, + extra_labels, + ), + tolerations=tolerations, + node_selectors=node_selectors, + ) diff --git a/src/validator_client/lodestar.star b/src/validator_client/lodestar.star new file mode 100644 index 000000000..c60c1b025 --- /dev/null +++ b/src/validator_client/lodestar.star @@ -0,0 +1,98 @@ +constants = import_module("../package_io/constants.star") +input_parser = import_module("../package_io/input_parser.star") +shared_utils = import_module("../shared_utils/shared_utils.star") +validator_client_shared = import_module("./shared.star") + +VERBOSITY_LEVELS = { + constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", + constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", + constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", + constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug", + constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace", +} + + +def get_config( + el_cl_genesis_data, + image, + participant_log_level, + global_log_level, + beacon_http_url, + cl_client_context, + el_client_context, + node_keystore_files, + v_min_cpu, + v_max_cpu, + v_min_mem, + v_max_mem, + extra_params, + extra_labels, + tolerations, + node_selectors, +): + log_level = input_parser.get_client_log_level_or_default( + participant_log_level, global_log_level, VERBOSITY_LEVELS + ) + + validator_keys_dirpath = shared_utils.path_join( + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + node_keystore_files.raw_keys_relative_dirpath, + ) + + validator_secrets_dirpath = shared_utils.path_join( + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + node_keystore_files.raw_secrets_relative_dirpath, + ) + + cmd = [ + "validator", + "--logLevel=" + log_level, + "--paramsFile=" + + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + + "/config.yaml", + "--beaconNodes=" + beacon_http_url, + "--keystoresDir=" + validator_keys_dirpath, + "--secretsDir=" + validator_secrets_dirpath, + "--suggestedFeeRecipient=" + constants.VALIDATING_REWARDS_ACCOUNT, + # vvvvvvvvvvvvvvvvvvv PROMETHEUS CONFIG vvvvvvvvvvvvvvvvvvvvv + "--metrics", + "--metrics.address=0.0.0.0", + "--metrics.port={0}".format( + validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM + ), + # ^^^^^^^^^^^^^^^^^^^ PROMETHEUS CONFIG ^^^^^^^^^^^^^^^^^^^^^ + "--graffiti=" + + cl_client_context.client_name + + "-" + + el_client_context.client_name, + ] + + if len(extra_params) > 0: + # this is a repeated, we convert it into Starlark + cmd.extend([param for param in extra_params]) + + files = { + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + } + + return ServiceConfig( + image=image, + ports=validator_client_shared.VALIDATOR_CLIENT_USED_PORTS, + cmd=cmd, + files=files, + private_ip_address_placeholder=validator_client_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, + min_cpu=v_min_cpu, + max_cpu=v_max_cpu, + min_memory=v_min_mem, + max_memory=v_max_mem, + labels=shared_utils.label_maker( + constants.VC_CLIENT_TYPE.lodestar, + constants.CLIENT_TYPES.validator, + image, + cl_client_context.client_name, + extra_labels, + ), + tolerations=tolerations, + node_selectors=node_selectors, + ) diff --git a/src/validator_client/nimbus.star b/src/validator_client/nimbus.star new file mode 100644 index 000000000..7a6ecaed5 --- /dev/null +++ b/src/validator_client/nimbus.star @@ -0,0 +1,79 @@ +constants = import_module("../package_io/constants.star") +shared_utils = import_module("../shared_utils/shared_utils.star") +validator_client_shared = import_module("./shared.star") + + +def get_config( + el_cl_genesis_data, + image, + beacon_http_url, + cl_client_context, + el_client_context, + node_keystore_files, + v_min_cpu, + v_max_cpu, + v_min_mem, + v_max_mem, + extra_params, + extra_labels, + tolerations, + node_selectors, +): + validator_keys_dirpath = "" + validator_secrets_dirpath = "" + if node_keystore_files != None: + validator_keys_dirpath = shared_utils.path_join( + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + node_keystore_files.nimbus_keys_relative_dirpath, + ) + validator_secrets_dirpath = shared_utils.path_join( + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + node_keystore_files.raw_secrets_relative_dirpath, + ) + + cmd = [ + "--beacon-node=" + beacon_http_url, + "--validators-dir=" + validator_keys_dirpath, + "--secrets-dir=" + validator_secrets_dirpath, + "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, + # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv + "--metrics", + "--metrics-address=0.0.0.0", + "--metrics-port={0}".format( + validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM + ), + "--graffiti=" + + cl_client_context.client_name + + "-" + + el_client_context.client_name, + ] + + if len(extra_params) > 0: + # this is a repeated, we convert it into Starlark + cmd.extend([param for param in extra_params]) + + files = { + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + } + + return ServiceConfig( + image=image, + ports=validator_client_shared.VALIDATOR_CLIENT_USED_PORTS, + cmd=cmd, + files=files, + private_ip_address_placeholder=validator_client_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, + min_cpu=v_min_cpu, + max_cpu=v_max_cpu, + min_memory=v_min_mem, + max_memory=v_max_mem, + labels=shared_utils.label_maker( + constants.VC_CLIENT_TYPE.nimbus, + constants.CLIENT_TYPES.validator, + image, + cl_client_context.client_name, + extra_labels, + ), + user=User(uid=0, gid=0), + tolerations=tolerations, + node_selectors=node_selectors, + ) diff --git a/src/validator_client/prysm.star b/src/validator_client/prysm.star new file mode 100644 index 000000000..5c05ac9c1 --- /dev/null +++ b/src/validator_client/prysm.star @@ -0,0 +1,92 @@ +constants = import_module("../package_io/constants.star") +shared_utils = import_module("../shared_utils/shared_utils.star") +validator_client_shared = import_module("./shared.star") + +PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/prysm-password" +PRYSM_BEACON_RPC_PORT = 4000 + + +def get_config( + el_cl_genesis_data, + image, + beacon_http_url, + cl_client_context, + el_client_context, + node_keystore_files, + v_min_cpu, + v_max_cpu, + v_min_mem, + v_max_mem, + extra_params, + extra_labels, + prysm_password_relative_filepath, + prysm_password_artifact_uuid, + tolerations, + node_selectors, +): + validator_keys_dirpath = shared_utils.path_join( + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + node_keystore_files.prysm_relative_dirpath, + ) + validator_secrets_dirpath = shared_utils.path_join( + PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER, + prysm_password_relative_filepath, + ) + + cmd = [ + "--accept-terms-of-use=true", # it's mandatory in order to run the node + "--chain-config-file=" + + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + + "/config.yaml", + "--beacon-rpc-provider=" + + "{}:{}".format( + cl_client_context.ip_addr, + PRYSM_BEACON_RPC_PORT, + ), + "--beacon-rest-api-provider=" + beacon_http_url, + "--wallet-dir=" + validator_keys_dirpath, + "--wallet-password-file=" + validator_secrets_dirpath, + "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, + # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv + "--disable-monitoring=false", + "--monitoring-host=0.0.0.0", + "--monitoring-port={0}".format( + validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM + ), + # ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^ + "--graffiti=" + + cl_client_context.client_name + + "-" + + el_client_context.client_name, + ] + + if len(extra_params) > 0: + # this is a repeated, we convert it into Starlark + cmd.extend([param for param in extra_params]) + + files = { + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: prysm_password_artifact_uuid, + } + + return ServiceConfig( + image=image, + ports=validator_client_shared.VALIDATOR_CLIENT_USED_PORTS, + cmd=cmd, + files=files, + private_ip_address_placeholder=validator_client_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, + min_cpu=v_min_cpu, + max_cpu=v_max_cpu, + min_memory=v_min_mem, + max_memory=v_max_mem, + labels=shared_utils.label_maker( + constants.VC_CLIENT_TYPE.prysm, + constants.CLIENT_TYPES.validator, + image, + cl_client_context.client_name, + extra_labels, + ), + tolerations=tolerations, + node_selectors=node_selectors, + ) diff --git a/src/validator_client/shared.star b/src/validator_client/shared.star new file mode 100644 index 000000000..fbbf27108 --- /dev/null +++ b/src/validator_client/shared.star @@ -0,0 +1,16 @@ +shared_utils = import_module("../shared_utils/shared_utils.star") + +PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER" +VALIDATOR_CLIENT_KEYS_MOUNTPOINT = "/keystores" + +VALIDATOR_CLIENT_METRICS_PORT_NUM = 8080 +VALIDATOR_CLIENT_METRICS_PORT_ID = "metrics" +METRICS_PATH = "/metrics" + +VALIDATOR_CLIENT_USED_PORTS = { + VALIDATOR_CLIENT_METRICS_PORT_ID: shared_utils.new_port_spec( + VALIDATOR_CLIENT_METRICS_PORT_NUM, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ), +} diff --git a/src/validator_client/teku.star b/src/validator_client/teku.star new file mode 100644 index 000000000..f644babf3 --- /dev/null +++ b/src/validator_client/teku.star @@ -0,0 +1,87 @@ +constants = import_module("../package_io/constants.star") +shared_utils = import_module("../shared_utils/shared_utils.star") +validator_client_shared = import_module("./shared.star") + + +def get_config( + el_cl_genesis_data, + image, + beacon_http_url, + cl_client_context, + el_client_context, + node_keystore_files, + v_min_cpu, + v_max_cpu, + v_min_mem, + v_max_mem, + extra_params, + extra_labels, + tolerations, + node_selectors, +): + validator_keys_dirpath = "" + validator_secrets_dirpath = "" + if node_keystore_files != None: + validator_keys_dirpath = shared_utils.path_join( + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + node_keystore_files.teku_keys_relative_dirpath, + ) + validator_secrets_dirpath = shared_utils.path_join( + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + node_keystore_files.teku_secrets_relative_dirpath, + ) + + cmd = [ + "validator-client", + "--network=" + + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + + "/config.yaml", + "--beacon-node-api-endpoint=" + beacon_http_url, + "--validator-keys={0}:{1}".format( + validator_keys_dirpath, + validator_secrets_dirpath, + ), + "--validators-proposer-default-fee-recipient=" + + constants.VALIDATING_REWARDS_ACCOUNT, + "--validators-graffiti=" + + cl_client_context.client_name + + "-" + + el_client_context.client_name, + # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv + "--metrics-enabled=true", + "--metrics-host-allowlist=*", + "--metrics-interface=0.0.0.0", + "--metrics-port={0}".format( + validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM + ), + ] + + if len(extra_params) > 0: + # this is a repeated, we convert it into Starlark + cmd.extend([param for param in extra_params]) + + files = { + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, + validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + } + + return ServiceConfig( + image=image, + ports=validator_client_shared.VALIDATOR_CLIENT_USED_PORTS, + cmd=cmd, + files=files, + private_ip_address_placeholder=validator_client_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, + min_cpu=v_min_cpu, + max_cpu=v_max_cpu, + min_memory=v_min_mem, + max_memory=v_max_mem, + labels=shared_utils.label_maker( + constants.VC_CLIENT_TYPE.teku, + constants.CLIENT_TYPES.validator, + image, + cl_client_context.client_name, + extra_labels, + ), + tolerations=tolerations, + node_selectors=node_selectors, + ) diff --git a/src/validator_client/validator_client_context.star b/src/validator_client/validator_client_context.star new file mode 100644 index 000000000..07939582c --- /dev/null +++ b/src/validator_client/validator_client_context.star @@ -0,0 +1,10 @@ +def new_validator_client_context( + service_name, + client_name, + metrics_info, +): + return struct( + service_name=service_name, + client_name=client_name, + metrics_info=metrics_info, + ) diff --git a/src/validator_client/validator_client_launcher.star b/src/validator_client/validator_client_launcher.star new file mode 100644 index 000000000..2d0fbcc09 --- /dev/null +++ b/src/validator_client/validator_client_launcher.star @@ -0,0 +1,179 @@ +input_parser = import_module("../package_io/input_parser.star") +constants = import_module("../package_io/constants.star") +node_metrics = import_module("../node_metrics_info.star") +validator_client_context = import_module("./validator_client_context.star") + +lighthouse = import_module("./lighthouse.star") +lodestar = import_module("./lodestar.star") +nimbus = import_module("./nimbus.star") +prysm = import_module("./prysm.star") +teku = import_module("./teku.star") +validator_client_shared = import_module("./shared.star") + +# The defaults for min/max CPU/memory that the validator client can use +MIN_CPU = 50 +MAX_CPU = 300 +MIN_MEMORY = 128 +MAX_MEMORY = 512 + + +def launch( + plan, + launcher, + service_name, + validator_client_type, + image, + participant_log_level, + global_log_level, + cl_client_context, + el_client_context, + node_keystore_files, + v_min_cpu, + v_max_cpu, + v_min_mem, + v_max_mem, + extra_params, + extra_labels, + prysm_password_relative_filepath, + prysm_password_artifact_uuid, + validator_tolerations, + participant_tolerations, + global_tolerations, + node_selectors, +): + if node_keystore_files == None: + return None + + tolerations = input_parser.get_client_tolerations( + validator_tolerations, participant_tolerations, global_tolerations + ) + + beacon_http_url = "http://{}:{}".format( + cl_client_context.ip_addr, + cl_client_context.http_port_num, + ) + + v_min_cpu = int(v_min_cpu) if int(v_min_cpu) > 0 else MIN_CPU + v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else MAX_CPU + v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else MIN_MEMORY + v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else MAX_MEMORY + + if validator_client_type == constants.VC_CLIENT_TYPE.lighthouse: + config = lighthouse.get_config( + el_cl_genesis_data=launcher.el_cl_genesis_data, + image=image, + participant_log_level=participant_log_level, + global_log_level=global_log_level, + beacon_http_url=beacon_http_url, + cl_client_context=cl_client_context, + el_client_context=el_client_context, + node_keystore_files=node_keystore_files, + v_min_cpu=v_min_cpu, + v_max_cpu=v_max_cpu, + v_min_mem=v_min_mem, + v_max_mem=v_max_mem, + extra_params=extra_params, + extra_labels=extra_labels, + tolerations=tolerations, + node_selectors=node_selectors, + ) + elif validator_client_type == constants.VC_CLIENT_TYPE.lodestar: + config = lodestar.get_config( + el_cl_genesis_data=launcher.el_cl_genesis_data, + image=image, + participant_log_level=participant_log_level, + global_log_level=global_log_level, + beacon_http_url=beacon_http_url, + cl_client_context=cl_client_context, + el_client_context=el_client_context, + node_keystore_files=node_keystore_files, + v_min_cpu=v_min_cpu, + v_max_cpu=v_max_cpu, + v_min_mem=v_min_mem, + v_max_mem=v_max_mem, + extra_params=extra_params, + extra_labels=extra_labels, + tolerations=tolerations, + node_selectors=node_selectors, + ) + elif validator_client_type == constants.VC_CLIENT_TYPE.teku: + config = teku.get_config( + el_cl_genesis_data=launcher.el_cl_genesis_data, + image=image, + beacon_http_url=beacon_http_url, + cl_client_context=cl_client_context, + el_client_context=el_client_context, + node_keystore_files=node_keystore_files, + v_min_cpu=v_min_cpu, + v_max_cpu=v_max_cpu, + v_min_mem=v_min_mem, + v_max_mem=v_max_mem, + extra_params=extra_params, + extra_labels=extra_labels, + tolerations=tolerations, + node_selectors=node_selectors, + ) + elif validator_client_type == constants.VC_CLIENT_TYPE.nimbus: + config = nimbus.get_config( + el_cl_genesis_data=launcher.el_cl_genesis_data, + image=image, + beacon_http_url=beacon_http_url, + cl_client_context=cl_client_context, + el_client_context=el_client_context, + node_keystore_files=node_keystore_files, + v_min_cpu=v_min_cpu, + v_max_cpu=v_max_cpu, + v_min_mem=v_min_mem, + v_max_mem=v_max_mem, + extra_params=extra_params, + extra_labels=extra_labels, + tolerations=tolerations, + node_selectors=node_selectors, + ) + elif validator_client_type == constants.VC_CLIENT_TYPE.prysm: + # Prysm VC only works with Prysm beacon node right now + if cl_client_context.client_name != constants.CL_CLIENT_TYPE.prysm: + fail("Prysm VC is only compatible with Prysm beacon node") + + config = prysm.get_config( + el_cl_genesis_data=launcher.el_cl_genesis_data, + image=image, + beacon_http_url=beacon_http_url, + cl_client_context=cl_client_context, + el_client_context=el_client_context, + node_keystore_files=node_keystore_files, + v_min_cpu=v_min_cpu, + v_max_cpu=v_max_cpu, + v_min_mem=v_min_mem, + v_max_mem=v_max_mem, + extra_params=extra_params, + extra_labels=extra_labels, + prysm_password_relative_filepath=prysm_password_relative_filepath, + prysm_password_artifact_uuid=prysm_password_artifact_uuid, + tolerations=tolerations, + node_selectors=node_selectors, + ) + else: + fail("Unsupported validator_client_type: {0}".format(validator_client_type)) + + validator_service = plan.add_service(service_name, config) + + validator_metrics_port = validator_service.ports[ + validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_ID + ] + validator_metrics_url = "{0}:{1}".format( + validator_service.ip_address, validator_metrics_port.number + ) + validator_node_metrics_info = node_metrics.new_node_metrics_info( + service_name, validator_client_shared.METRICS_PATH, validator_metrics_url + ) + + return validator_client_context.new_validator_client_context( + service_name=service_name, + client_name=validator_client_type, + metrics_info=validator_node_metrics_info, + ) + + +def new_validator_client_launcher(el_cl_genesis_data): + return struct(el_cl_genesis_data=el_cl_genesis_data) From d599729295aa3274d23e4e8e99b56288cde3fc04 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Tue, 27 Feb 2024 11:05:16 +0100 Subject: [PATCH 23/33] feat: add nimbus-eth1 (#496) --- .github/tests/nimbus-eth1-all.yaml | 14 ++ README.md | 3 +- src/el/nimbus-eth1/nimbus_launcher.star | 270 ++++++++++++++++++++++++ src/package_io/constants.star | 22 ++ src/package_io/input_parser.star | 1 + src/participant_network.star | 9 + 6 files changed, 318 insertions(+), 1 deletion(-) create mode 100644 .github/tests/nimbus-eth1-all.yaml create mode 100644 src/el/nimbus-eth1/nimbus_launcher.star diff --git a/.github/tests/nimbus-eth1-all.yaml b/.github/tests/nimbus-eth1-all.yaml new file mode 100644 index 000000000..fe0ee47dc --- /dev/null +++ b/.github/tests/nimbus-eth1-all.yaml @@ -0,0 +1,14 @@ +participants: + - el_client_type: geth + cl_client_type: teku + - el_client_type: nimbus + cl_client_type: teku + - el_client_type: nimbus + cl_client_type: prysm + - el_client_type: nimbus + cl_client_type: nimbus + - el_client_type: nimbus + cl_client_type: lighthouse + - el_client_type: nimbus + cl_client_type: lodestar +additional_services: [] diff --git a/README.md b/README.md index 199a7803d..c04ac0db4 100644 --- a/README.md +++ b/README.md @@ -153,7 +153,7 @@ To configure the package behaviour, you can modify your `network_params.yaml` fi # Specification of the participants in the network participants: # The type of EL client that should be started - # Valid values are geth, nethermind, erigon, besu, ethereumjs, reth + # Valid values are geth, nethermind, erigon, besu, ethereumjs, reth, nimbus-eth1 - el_client_type: geth # The Docker image that should be used for the EL client; leave blank to use the default for the client type @@ -164,6 +164,7 @@ participants: # - besu: hyperledger/besu:develop # - reth: ghcr.io/paradigmxyz/reth # - ethereumjs: ethpandaops/ethereumjs:master + # - nimbus-eth1: ethpandaops/nimbus-eth1:master el_client_image: "" # The log level string that this participant's EL client should log at diff --git a/src/el/nimbus-eth1/nimbus_launcher.star b/src/el/nimbus-eth1/nimbus_launcher.star new file mode 100644 index 000000000..bed447c00 --- /dev/null +++ b/src/el/nimbus-eth1/nimbus_launcher.star @@ -0,0 +1,270 @@ +shared_utils = import_module("../../shared_utils/shared_utils.star") +input_parser = import_module("../../package_io/input_parser.star") +el_client_context = import_module("../../el/el_client_context.star") +el_admin_node_info = import_module("../../el/el_admin_node_info.star") +node_metrics = import_module("../../node_metrics_info.star") +constants = import_module("../../package_io/constants.star") + +WS_RPC_PORT_NUM = 8545 +DISCOVERY_PORT_NUM = 30303 +ENGINE_RPC_PORT_NUM = 8551 +METRICS_PORT_NUM = 9001 + +# The min/max CPU/memory that the execution node can use +EXECUTION_MIN_CPU = 100 +EXECUTION_MIN_MEMORY = 256 + +# Port IDs +WS_RPC_PORT_ID = "ws-rpc" +TCP_DISCOVERY_PORT_ID = "tcp-discovery" +UDP_DISCOVERY_PORT_ID = "udp-discovery" +ENGINE_RPC_PORT_ID = "engine-rpc" +METRICS_PORT_ID = "metrics" + +# Paths +METRICS_PATH = "/metrics" + +# The dirpath of the execution data directory on the client container +EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER = "/data/nimbus/execution-data" + +PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER" + +USED_PORTS = { + WS_RPC_PORT_ID: shared_utils.new_port_spec( + WS_RPC_PORT_NUM, + shared_utils.TCP_PROTOCOL, + ), + TCP_DISCOVERY_PORT_ID: shared_utils.new_port_spec( + DISCOVERY_PORT_NUM, + shared_utils.TCP_PROTOCOL, + ), + UDP_DISCOVERY_PORT_ID: shared_utils.new_port_spec( + DISCOVERY_PORT_NUM, + shared_utils.UDP_PROTOCOL, + ), + ENGINE_RPC_PORT_ID: shared_utils.new_port_spec( + ENGINE_RPC_PORT_NUM, + shared_utils.TCP_PROTOCOL, + ), + METRICS_PORT_ID: shared_utils.new_port_spec( + METRICS_PORT_NUM, + shared_utils.TCP_PROTOCOL, + ), +} + +VERBOSITY_LEVELS = { + constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", + constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", + constants.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO", + constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "DEBUG", + constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE", +} + + +def launch( + plan, + launcher, + service_name, + image, + participant_log_level, + global_log_level, + # If empty then the node will be launched as a bootnode + existing_el_clients, + el_min_cpu, + el_max_cpu, + el_min_mem, + el_max_mem, + extra_params, + extra_env_vars, + extra_labels, + persistent, + el_volume_size, + tolerations, + node_selectors, +): + log_level = input_parser.get_client_log_level_or_default( + participant_log_level, global_log_level, VERBOSITY_LEVELS + ) + + network_name = shared_utils.get_network_name(launcher.network) + + el_min_cpu = int(el_min_cpu) if int(el_min_cpu) > 0 else EXECUTION_MIN_CPU + el_max_cpu = ( + int(el_max_cpu) + if int(el_max_cpu) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["nimbus_eth1_max_cpu"] + ) + el_min_mem = int(el_min_mem) if int(el_min_mem) > 0 else EXECUTION_MIN_MEMORY + el_max_mem = ( + int(el_max_mem) + if int(el_max_mem) > 0 + else constants.RAM_CPU_OVERRIDES[network_name]["nimbus_eth1_max_mem"] + ) + + el_volume_size = ( + el_volume_size + if int(el_volume_size) > 0 + else constants.VOLUME_SIZE[network_name]["nimbus_eth1_volume_size"] + ) + + cl_client_name = service_name.split("-")[3] + + config = get_config( + plan, + launcher.el_cl_genesis_data, + launcher.jwt_file, + launcher.network, + image, + service_name, + existing_el_clients, + cl_client_name, + log_level, + el_min_cpu, + el_max_cpu, + el_min_mem, + el_max_mem, + extra_params, + extra_env_vars, + extra_labels, + persistent, + el_volume_size, + tolerations, + node_selectors, + ) + + service = plan.add_service(service_name, config) + + enode = el_admin_node_info.get_enode_for_node(plan, service_name, WS_RPC_PORT_ID) + + metric_url = "{0}:{1}".format(service.ip_address, METRICS_PORT_NUM) + nimbus_metrics_info = node_metrics.new_node_metrics_info( + service_name, METRICS_PATH, metric_url + ) + + return el_client_context.new_el_client_context( + "nimbus", + "", # nimbus has no enr + enode, + service.ip_address, + WS_RPC_PORT_NUM, + WS_RPC_PORT_NUM, + ENGINE_RPC_PORT_NUM, + service_name, + [nimbus_metrics_info], + ) + + +def get_config( + plan, + el_cl_genesis_data, + jwt_file, + network, + image, + service_name, + existing_el_clients, + cl_client_name, + verbosity_level, + el_min_cpu, + el_max_cpu, + el_min_mem, + el_max_mem, + extra_params, + extra_env_vars, + extra_labels, + persistent, + el_volume_size, + tolerations, + node_selectors, +): + cmd = [ + "--log-level={0}".format(verbosity_level), + "--data-dir=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, + "--http-port={0}".format(WS_RPC_PORT_NUM), + "--http-address=0.0.0.0", + "--rpc", + "--rpc-api=eth,debug,exp", + "--ws", + "--ws-api=eth,debug,exp", + "--engine-api", + "--engine-api-address=0.0.0.0", + "--engine-api-port={0}".format(ENGINE_RPC_PORT_NUM), + "--jwt-secret=" + constants.JWT_MOUNT_PATH_ON_CONTAINER, + "--metrics", + "--metrics-address=0.0.0.0", + "--metrics-port={0}".format(METRICS_PORT_NUM), + ] + if ( + network not in constants.PUBLIC_NETWORKS + or constants.NETWORK_NAME.shadowfork in network + ): + cmd.append( + "--custom-network=" + + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + + "/genesis.json" + ) + else: + cmd.append("--network=" + network) + + if network == constants.NETWORK_NAME.kurtosis: + if len(existing_el_clients) > 0: + cmd.append( + "--bootstrap-node=" + + ",".join( + [ + ctx.enode + for ctx in existing_el_clients[: constants.MAX_ENODE_ENTRIES] + ] + ) + ) + elif network not in constants.PUBLIC_NETWORKS: + cmd.append( + "--bootstrap-node=" + + shared_utils.get_devnet_enodes( + plan, el_cl_genesis_data.files_artifact_uuid + ) + ) + + if len(extra_params) > 0: + # this is a repeated, we convert it into Starlark + cmd.extend([param for param in extra_params]) + + files = { + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, + constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, + } + + if persistent: + files[EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER] = Directory( + persistent_key="data-{0}".format(service_name), + size=el_volume_size, + ) + + return ServiceConfig( + image=image, + ports=USED_PORTS, + cmd=cmd, + files=files, + private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, + min_cpu=el_min_cpu, + max_cpu=el_max_cpu, + min_memory=el_min_mem, + max_memory=el_max_mem, + env_vars=extra_env_vars, + labels=shared_utils.label_maker( + constants.EL_CLIENT_TYPE.nimbus, + constants.CLIENT_TYPES.el, + image, + cl_client_name, + extra_labels, + ), + tolerations=tolerations, + node_selectors=node_selectors, + ) + + +def new_nimbus_launcher(el_cl_genesis_data, jwt_file, network): + return struct( + el_cl_genesis_data=el_cl_genesis_data, + jwt_file=jwt_file, + network=network, + ) diff --git a/src/package_io/constants.star b/src/package_io/constants.star index 9d277958f..4c54c7501 100644 --- a/src/package_io/constants.star +++ b/src/package_io/constants.star @@ -6,6 +6,7 @@ EL_CLIENT_TYPE = struct( besu="besu", reth="reth", ethereumjs="ethereumjs", + nimbus="nimbus", ) CL_CLIENT_TYPE = struct( @@ -131,6 +132,7 @@ VOLUME_SIZE = { "besu_volume_size": 1000000, # 1TB "reth_volume_size": 3000000, # 3TB "ethereumjs_volume_size": 1000000, # 1TB + "nimbus_eth1_volume_size": 1000000, # 1TB "prysm_volume_size": 500000, # 500GB "lighthouse_volume_size": 500000, # 500GB "teku_volume_size": 500000, # 500GB @@ -144,6 +146,7 @@ VOLUME_SIZE = { "besu_volume_size": 800000, # 800GB "reth_volume_size": 1200000, # 1200GB "ethereumjs_volume_size": 800000, # 800GB + "nimbus_eth1_volume_size": 800000, # 800GB "prysm_volume_size": 300000, # 300GB "lighthouse_volume_size": 300000, # 300GB "teku_volume_size": 300000, # 300GB @@ -157,6 +160,7 @@ VOLUME_SIZE = { "besu_volume_size": 300000, # 300GB "reth_volume_size": 500000, # 500GB "ethereumjs_volume_size": 300000, # 300GB + "nimbus_eth1_volume_size": 300000, # 300GB "prysm_volume_size": 150000, # 150GB "lighthouse_volume_size": 150000, # 150GB "teku_volume_size": 150000, # 150GB @@ -170,6 +174,7 @@ VOLUME_SIZE = { "besu_volume_size": 100000, # 100GB "reth_volume_size": 200000, # 200GB "ethereumjs_volume_size": 100000, # 100GB + "nimbus_eth1_volume_size": 100000, # 100GB "prysm_volume_size": 100000, # 100GB "lighthouse_volume_size": 100000, # 100GB "teku_volume_size": 100000, # 100GB @@ -183,6 +188,7 @@ VOLUME_SIZE = { "besu_volume_size": 100000, # 100GB "reth_volume_size": 200000, # 200GB "ethereumjs_volume_size": 100000, # 100GB + "nimbus_eth1_volume_size": 100000, # 100GB "prysm_volume_size": 100000, # 100GB "lighthouse_volume_size": 100000, # 100GB "teku_volume_size": 100000, # 100GB @@ -196,6 +202,7 @@ VOLUME_SIZE = { "besu_volume_size": 3000, # 3GB "reth_volume_size": 3000, # 3GB "ethereumjs_volume_size": 3000, # 3GB + "nimbus_eth1_volume_size": 3000, # 3GB "prysm_volume_size": 1000, # 1GB "lighthouse_volume_size": 1000, # 1GB "teku_volume_size": 1000, # 1GB @@ -209,6 +216,7 @@ VOLUME_SIZE = { "besu_volume_size": 3000, # 3GB "reth_volume_size": 3000, # 3GB "ethereumjs_volume_size": 3000, # 3GB + "nimbus_eth1_volume_size": 3000, # 3GB "prysm_volume_size": 1000, # 1GB "lighthouse_volume_size": 1000, # 1GB "teku_volume_size": 1000, # 1GB @@ -231,6 +239,8 @@ RAM_CPU_OVERRIDES = { "reth_max_cpu": 4000, # 4 cores "ethereumjs_max_mem": 16384, # 16GB "ethereumjs_max_cpu": 4000, # 4 cores + "nimbus_eth1_max_mem": 16384, # 16GB + "nimbus_eth1_max_cpu": 4000, # 4 cores "prysm_max_mem": 16384, # 16GB "prysm_max_cpu": 4000, # 4 cores "lighthouse_max_mem": 16384, # 16GB @@ -255,6 +265,8 @@ RAM_CPU_OVERRIDES = { "reth_max_cpu": 2000, # 2 cores "ethereumjs_max_mem": 8192, # 8GB "ethereumjs_max_cpu": 2000, # 2 cores + "nimbus_eth1_max_mem": 8192, # 8GB + "nimbus_eth1_max_cpu": 2000, # 2 cores "prysm_max_mem": 8192, # 8GB "prysm_max_cpu": 2000, # 2 cores "lighthouse_max_mem": 8192, # 8GB @@ -279,6 +291,8 @@ RAM_CPU_OVERRIDES = { "reth_max_cpu": 1000, # 1 core "ethereumjs_max_mem": 4096, # 4GB "ethereumjs_max_cpu": 1000, # 1 core + "nimbus_eth1_max_mem": 4096, # 4GB + "nimbus_eth1_max_cpu": 1000, # 1 core "prysm_max_mem": 4096, # 4GB "prysm_max_cpu": 1000, # 1 core "lighthouse_max_mem": 4096, # 4GB @@ -303,6 +317,8 @@ RAM_CPU_OVERRIDES = { "reth_max_cpu": 2000, # 2 cores "ethereumjs_max_mem": 8192, # 8GB "ethereumjs_max_cpu": 2000, # 2 cores + "nimbus_eth1_max_mem": 8192, # 8GB + "nimbus_eth1_max_cpu": 2000, # 2 cores "prysm_max_mem": 8192, # 8GB "prysm_max_cpu": 2000, # 2 cores "lighthouse_max_mem": 8192, # 8GB @@ -327,6 +343,8 @@ RAM_CPU_OVERRIDES = { "reth_max_cpu": 1000, # 1 core "ethereumjs_max_mem": 4096, # 4GB "ethereumjs_max_cpu": 1000, # 1 core + "nimbus_eth1_max_mem": 4096, # 4GB + "nimbus_eth1_max_cpu": 1000, # 1 core "prysm_max_mem": 4096, # 4GB "prysm_max_cpu": 1000, # 1 core "lighthouse_max_mem": 4096, # 4GB @@ -351,6 +369,8 @@ RAM_CPU_OVERRIDES = { "reth_max_cpu": 1000, # 1 core "ethereumjs_max_mem": 1024, # 1GB "ethereumjs_max_cpu": 1000, # 1 core + "nimbus_eth1_max_mem": 1024, # 1GB + "nimbus_eth1_max_cpu": 1000, # 1 core "prysm_max_mem": 1024, # 1GB "prysm_max_cpu": 1000, # 1 core "lighthouse_max_mem": 1024, # 1GB @@ -375,6 +395,8 @@ RAM_CPU_OVERRIDES = { "reth_max_cpu": 1000, # 1 core "ethereumjs_max_mem": 1024, # 1GB "ethereumjs_max_cpu": 1000, # 1 core + "nimbus_eth1_max_mem": 1024, # 1GB + "nimbus_eth1_max_cpu": 1000, # 1 core "prysm_max_mem": 1024, # 1GB "prysm_max_cpu": 1000, # 1 core "lighthouse_max_mem": 1024, # 1GB diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index 5b4a9bb5a..0121de343 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -11,6 +11,7 @@ DEFAULT_EL_IMAGES = { "besu": "hyperledger/besu:latest", "reth": "ghcr.io/paradigmxyz/reth", "ethereumjs": "ethpandaops/ethereumjs:master", + "nimbus": "ethpandaops/nimbus-eth1:master", } DEFAULT_CL_IMAGES = { diff --git a/src/participant_network.star b/src/participant_network.star index aeea46048..9d93ea061 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -21,6 +21,7 @@ erigon = import_module("./el/erigon/erigon_launcher.star") nethermind = import_module("./el/nethermind/nethermind_launcher.star") reth = import_module("./el/reth/reth_launcher.star") ethereumjs = import_module("./el/ethereumjs/ethereumjs_launcher.star") +nimbus_eth1 = import_module("./el/nimbus-eth1/nimbus_launcher.star") lighthouse = import_module("./cl/lighthouse/lighthouse_launcher.star") lodestar = import_module("./cl/lodestar/lodestar_launcher.star") @@ -419,6 +420,14 @@ def launch_participant_network( ), "launch_method": ethereumjs.launch, }, + constants.EL_CLIENT_TYPE.nimbus: { + "launcher": nimbus_eth1.new_nimbus_launcher( + el_cl_data, + jwt_file, + network_params.network, + ), + "launch_method": nimbus_eth1.launch, + }, } all_el_client_contexts = [] From 1d5a7792c8175d1fc85e424b5ddf60baec551821 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Tue, 27 Feb 2024 13:07:04 +0100 Subject: [PATCH 24/33] fix: nimbus-eth1 advertise proper extip (#501) --- src/el/nimbus-eth1/nimbus_launcher.star | 1 + 1 file changed, 1 insertion(+) diff --git a/src/el/nimbus-eth1/nimbus_launcher.star b/src/el/nimbus-eth1/nimbus_launcher.star index bed447c00..4b701e0fb 100644 --- a/src/el/nimbus-eth1/nimbus_launcher.star +++ b/src/el/nimbus-eth1/nimbus_launcher.star @@ -192,6 +192,7 @@ def get_config( "--metrics", "--metrics-address=0.0.0.0", "--metrics-port={0}".format(METRICS_PORT_NUM), + "--nat=extip:{0}".format(PRIVATE_IP_ADDRESS_PLACEHOLDER), ] if ( network not in constants.PUBLIC_NETWORKS From beb764fb9a18fcb09cb7d3d9ee48e4826595512d Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Tue, 27 Feb 2024 15:58:25 +0100 Subject: [PATCH 25/33] feat: enable dencun-genesis (#500) Co-authored-by: eth2353 <70237279+eth2353@users.noreply.github.com> --- .circleci/config.yml | 4 +-- .github/tests/dencun-genesis.yaml | 28 +++++++++++++++++++++ .github/tests/holesky-shadowfork.yaml_norun | 6 ++--- .github/workflows/nightly.yml | 2 +- README.md | 2 +- src/package_io/constants.star | 1 + src/package_io/input_parser.star | 3 --- src/participant_network.star | 6 +++++ 8 files changed, 42 insertions(+), 10 deletions(-) create mode 100644 .github/tests/dencun-genesis.yaml diff --git a/.circleci/config.yml b/.circleci/config.yml index 3c0057d13..7161fef3a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -100,14 +100,14 @@ jobs: executor: ubuntu_vm steps: - checkout - - run: kurtosis run ${PWD} "$(cat ./.github/tests/mev.yaml)" + - run: kurtosis run ${PWD} --verbosity detailed --args-file=./.github/tests/mev.yaml mix_with_tools_k8s: resource_class: xlarge executor: ubuntu_vm steps: - checkout - - run: kurtosis run ${PWD} "$(cat ./.github/tests/mix-with-tools-mev.yaml)" + - run: kurtosis run ${PWD} --verbosity detailed --args-file=./.github/tests/mev.yaml mix_persistence_k8s: resource_class: xlarge diff --git a/.github/tests/dencun-genesis.yaml b/.github/tests/dencun-genesis.yaml new file mode 100644 index 000000000..1c38e6597 --- /dev/null +++ b/.github/tests/dencun-genesis.yaml @@ -0,0 +1,28 @@ +participants: + - el_client_type: geth + el_client_image: ethpandaops/geth:master + cl_client_type: teku + cl_client_image: ethpandaops/teku:master + - el_client_type: nethermind + el_client_image: ethpandaops/nethermind:master + cl_client_type: prysm + cl_client_image: gcr.io/prysmaticlabs/prysm/beacon-chain:latest + - el_client_type: erigon + el_client_image: ethpandaops/erigon:devel + cl_client_type: nimbus + cl_client_image: ethpandaops/nimbus:unstable + - el_client_type: besu + el_client_image: ethpandaops/besu:main + cl_client_type: lighthouse + cl_client_image: ethpandaops/lighthouse:unstable + - el_client_type: reth + el_client_image: ethpandaops/reth:main + cl_client_type: lodestar + cl_client_image: ethpandaops/lodestar:unstable + - el_client_type: ethereumjs + el_client_image: ethpandaops/ethereumjs:master + cl_client_type: teku + cl_client_image: ethpandaops/teku:master +network_params: + deneb_fork_epoch: 0 +additional_services: [] diff --git a/.github/tests/holesky-shadowfork.yaml_norun b/.github/tests/holesky-shadowfork.yaml_norun index f26bd9c8c..ca2634a03 100644 --- a/.github/tests/holesky-shadowfork.yaml_norun +++ b/.github/tests/holesky-shadowfork.yaml_norun @@ -1,10 +1,10 @@ participants: - el_client_type: geth - el_client_image: ethereum/client-go:v1.13.11 + el_client_image: ethereum/client-go:v1.13.14 cl_client_type: teku - cl_client_image: consensys/teku:24.1.1 + cl_client_image: consensys/teku:24.2.0 network_params: - dencun_fork_epoch: 1 + dencun_fork_epoch: 0 network: holesky-shadowfork additional_services: - dora diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 75b9106ae..ffab067da 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -36,7 +36,7 @@ jobs: - name: Run Starlark run: | if [ "${{ matrix.file_name }}" != "./.github/tests/mix-with-tools-mev.yaml" ]; then - kurtosis run ${{ github.workspace }} --args-file ${{ matrix.file_name }} + kurtosis run ${{ github.workspace }} --verbosity detailed --args-file ${{ matrix.file_name }} else echo "Skipping ./.github/tests/mix-with-tools-mev.yaml" fi diff --git a/README.md b/README.md index c04ac0db4..b4eff39c2 100644 --- a/README.md +++ b/README.md @@ -222,7 +222,7 @@ participants: # Whether to use a separate validator client attached to the CL client. # Defaults to false for clients that can run both in one process (Teku, Nimbus) - use_separate_validator_client: true/false + use_separate_validator_client: false # The type of validator client that should be used # Valid values are nimbus, lighthouse, lodestar, teku, and prysm diff --git a/src/package_io/constants.star b/src/package_io/constants.star index 4c54c7501..8c45120b2 100644 --- a/src/package_io/constants.star +++ b/src/package_io/constants.star @@ -65,6 +65,7 @@ ELECTRA_FORK_VERSION = "0x60000038" ETHEREUM_GENESIS_GENERATOR = struct( bellatrix_genesis="ethpandaops/ethereum-genesis-generator:1.3.15", # EOL capella_genesis="ethpandaops/ethereum-genesis-generator:2.0.12", # Default + deneb_genesis="ethpandaops/ethereum-genesis-generator:default-deneb-genesis", # Soon to become default verkle_support_genesis="ethpandaops/ethereum-genesis-generator:3.0.0-rc.19", # soon to be deneb genesis verkle_genesis="ethpandaops/ethereum-genesis-generator:4.0.0-rc.6", ) diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index 0121de343..e29b91028 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -477,9 +477,6 @@ def parse_network_params(input_args): if result["network_params"]["seconds_per_slot"] == 0: fail("seconds_per_slot is 0 needs to be > 0 ") - if result["network_params"]["deneb_fork_epoch"] == 0: - fail("deneb_fork_epoch is 0 needs to be > 0 ") - if result["network_params"]["electra_fork_epoch"] != None: # if electra is defined, then deneb needs to be set very high result["network_params"]["deneb_fork_epoch"] = HIGH_DENEB_VALUE_FORK_VERKLE diff --git a/src/participant_network.star b/src/participant_network.star index 9d93ea061..2364a46d6 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -241,10 +241,16 @@ def launch_participant_network( elif ( network_params.capella_fork_epoch == 0 and network_params.electra_fork_epoch == None + and network_params.deneb_fork_epoch > 0 ): ethereum_genesis_generator_image = ( constants.ETHEREUM_GENESIS_GENERATOR.capella_genesis ) + # we are running deneb genesis - experimental, soon to become default + elif network_params.deneb_fork_epoch == 0: + ethereum_genesis_generator_image = ( + constants.ETHEREUM_GENESIS_GENERATOR.deneb_genesis + ) # we are running electra - experimental elif network_params.electra_fork_epoch != None: if network_params.electra_fork_epoch == 0: From 181dd04c2db17c58cb9370b0d24e12e4c191a13d Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Thu, 29 Feb 2024 18:21:51 +0100 Subject: [PATCH 26/33] fix: use the cl as the default validator image if none are defined (#503) This PR ensures that the validator image will be the same as CL image (if one is defined) and only fallback to the default validator image if there is no validator image nor beacon image is defined. --- .github/tests/dencun-devnet-12.yaml | 12 ------------ .github/tests/dencun-genesis.yaml | 12 ------------ .github/tests/ephemery.yaml | 12 ------------ .github/tests/mev.yaml | 2 +- .github/tests/mix-persistence-k8s.yaml | 1 - .github/tests/mix-persistence.yaml | 1 - .github/tests/mix-with-tools-mev.yaml | 2 +- .github/tests/sepolia-mix.yaml | 2 +- .github/tests/split-nimbus.yaml | 6 ------ src/package_io/input_parser.star | 13 ++++++++++++- src/participant_network.star | 4 ++-- 11 files changed, 17 insertions(+), 50 deletions(-) diff --git a/.github/tests/dencun-devnet-12.yaml b/.github/tests/dencun-devnet-12.yaml index 755ae74c6..7d63d6233 100644 --- a/.github/tests/dencun-devnet-12.yaml +++ b/.github/tests/dencun-devnet-12.yaml @@ -1,28 +1,16 @@ participants: - el_client_type: geth - el_client_image: ethpandaops/geth:master cl_client_type: teku - cl_client_image: ethpandaops/teku:master - el_client_type: nethermind - el_client_image: ethpandaops/nethermind:master cl_client_type: prysm - cl_client_image: gcr.io/prysmaticlabs/prysm/beacon-chain:latest - el_client_type: erigon - el_client_image: ethpandaops/erigon:devel cl_client_type: nimbus - cl_client_image: ethpandaops/nimbus:unstable - el_client_type: besu - el_client_image: ethpandaops/besu:main cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:unstable - el_client_type: reth - el_client_image: ethpandaops/reth:main cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:unstable - el_client_type: ethereumjs - el_client_image: ethpandaops/ethereumjs:master cl_client_type: teku - cl_client_image: ethpandaops/teku:master network_params: network: "dencun-devnet-12" additional_services: [] diff --git a/.github/tests/dencun-genesis.yaml b/.github/tests/dencun-genesis.yaml index 1c38e6597..6c7709760 100644 --- a/.github/tests/dencun-genesis.yaml +++ b/.github/tests/dencun-genesis.yaml @@ -1,28 +1,16 @@ participants: - el_client_type: geth - el_client_image: ethpandaops/geth:master cl_client_type: teku - cl_client_image: ethpandaops/teku:master - el_client_type: nethermind - el_client_image: ethpandaops/nethermind:master cl_client_type: prysm - cl_client_image: gcr.io/prysmaticlabs/prysm/beacon-chain:latest - el_client_type: erigon - el_client_image: ethpandaops/erigon:devel cl_client_type: nimbus - cl_client_image: ethpandaops/nimbus:unstable - el_client_type: besu - el_client_image: ethpandaops/besu:main cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:unstable - el_client_type: reth - el_client_image: ethpandaops/reth:main cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:unstable - el_client_type: ethereumjs - el_client_image: ethpandaops/ethereumjs:master cl_client_type: teku - cl_client_image: ethpandaops/teku:master network_params: deneb_fork_epoch: 0 additional_services: [] diff --git a/.github/tests/ephemery.yaml b/.github/tests/ephemery.yaml index ac54f2ed9..e9b93177f 100644 --- a/.github/tests/ephemery.yaml +++ b/.github/tests/ephemery.yaml @@ -1,28 +1,16 @@ participants: - el_client_type: geth - el_client_image: ethpandaops/geth:master cl_client_type: teku - cl_client_image: ethpandaops/teku:master - el_client_type: nethermind - el_client_image: ethpandaops/nethermind:master cl_client_type: prysm - cl_client_image: gcr.io/prysmaticlabs/prysm/beacon-chain:latest - el_client_type: erigon - el_client_image: ethpandaops/erigon:devel cl_client_type: nimbus - cl_client_image: ethpandaops/nimbus:unstable - el_client_type: besu - el_client_image: ethpandaops/besu:main cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:unstable - el_client_type: reth - el_client_image: ethpandaops/reth:main cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:unstable - el_client_type: ethereumjs - el_client_image: ethpandaops/ethereumjs:master cl_client_type: teku - cl_client_image: ethpandaops/teku:master network_params: network: "ephemery" additional_services: [] diff --git a/.github/tests/mev.yaml b/.github/tests/mev.yaml index 966cbc1ac..f1d515051 100644 --- a/.github/tests/mev.yaml +++ b/.github/tests/mev.yaml @@ -9,6 +9,6 @@ additional_services: - prometheus_grafana mev_params: launch_custom_flood: true - mev_relay_image: flashbots/mev-boost-relay:0.28.0a2 + mev_relay_image: flashbots/mev-boost-relay:latest network_params: seconds_per_slot: 3 diff --git a/.github/tests/mix-persistence-k8s.yaml b/.github/tests/mix-persistence-k8s.yaml index 4c6d8443e..6b83832a9 100644 --- a/.github/tests/mix-persistence-k8s.yaml +++ b/.github/tests/mix-persistence-k8s.yaml @@ -6,7 +6,6 @@ participants: cl_client_type: prysm - el_client_type: erigon cl_client_type: nimbus - cl_client_image: ethpandaops/nimbus:unstable use_separate_validator_client: true - el_client_type: besu cl_client_type: lighthouse diff --git a/.github/tests/mix-persistence.yaml b/.github/tests/mix-persistence.yaml index 4c6d8443e..6b83832a9 100644 --- a/.github/tests/mix-persistence.yaml +++ b/.github/tests/mix-persistence.yaml @@ -6,7 +6,6 @@ participants: cl_client_type: prysm - el_client_type: erigon cl_client_type: nimbus - cl_client_image: ethpandaops/nimbus:unstable use_separate_validator_client: true - el_client_type: besu cl_client_type: lighthouse diff --git a/.github/tests/mix-with-tools-mev.yaml b/.github/tests/mix-with-tools-mev.yaml index 51aaccc45..2c287081f 100644 --- a/.github/tests/mix-with-tools-mev.yaml +++ b/.github/tests/mix-with-tools-mev.yaml @@ -26,5 +26,5 @@ ethereum_metrics_exporter_enabled: true snooper_enabled: true mev_type: full mev_params: - mev_relay_image: flashbots/mev-boost-relay:0.27 + mev_relay_image: flashbots/mev-boost-relay:latest persistent: True diff --git a/.github/tests/sepolia-mix.yaml b/.github/tests/sepolia-mix.yaml index 768ee7e57..9376be804 100644 --- a/.github/tests/sepolia-mix.yaml +++ b/.github/tests/sepolia-mix.yaml @@ -12,5 +12,5 @@ participants: - el_client_type: ethereumjs cl_client_type: nimbus network_params: - network: "sepolia" + network: sepolia additional_services: [] diff --git a/.github/tests/split-nimbus.yaml b/.github/tests/split-nimbus.yaml index 21720cf85..d41d27d0b 100644 --- a/.github/tests/split-nimbus.yaml +++ b/.github/tests/split-nimbus.yaml @@ -1,27 +1,21 @@ participants: - el_client_type: geth cl_client_type: nimbus - cl_client_image: ethpandaops/nimbus:unstable use_separate_validator_client: true validator_count: 0 - el_client_type: nethermind cl_client_type: nimbus use_separate_validator_client: true - cl_client_image: ethpandaops/nimbus:unstable - el_client_type: erigon cl_client_type: nimbus use_separate_validator_client: true - cl_client_image: ethpandaops/nimbus:unstable - el_client_type: besu cl_client_type: nimbus use_separate_validator_client: true - cl_client_image: ethpandaops/nimbus:unstable - el_client_type: reth cl_client_type: nimbus use_separate_validator_client: true - cl_client_image: ethpandaops/nimbus:unstable - el_client_type: ethereumjs cl_client_type: nimbus use_separate_validator_client: true - cl_client_image: ethpandaops/nimbus:unstable additional_services: [] diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index e29b91028..7b4e050dd 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -392,7 +392,18 @@ def parse_network_params(input_args): validator_client_image = participant["validator_client_image"] if validator_client_image == "": - default_image = DEFAULT_VC_IMAGES.get(validator_client_type, "") + if cl_image == "": + # If the validator client image is also empty, default to the image for the chosen CL client + default_image = DEFAULT_VC_IMAGES.get(validator_client_type, "") + else: + if cl_client_type == "prysm": + default_image = cl_image.replace("beacon-chain", "validator") + elif cl_client_type == "nimbus": + default_image = cl_image.replace( + "nimbus-eth2", "nimbus-validator-client" + ) + else: + default_image = cl_image if default_image == "": fail( "{0} received an empty image name and we don't have a default for it".format( diff --git a/src/participant_network.star b/src/participant_network.star index 2364a46d6..696ee53a5 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -775,8 +775,8 @@ def launch_participant_network( launcher=validator_client.new_validator_client_launcher( el_cl_genesis_data=el_cl_data ), - service_name="validator-client-{0}-{1}".format( - index_str, validator_client_type + service_name="vc-{0}-{1}-{2}".format( + index_str, validator_client_type, el_client_type ), validator_client_type=validator_client_type, image=participant.validator_client_image, From f9343a2914456196e1209336c426b6ad44958428 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Fri, 1 Mar 2024 17:31:57 +0100 Subject: [PATCH 27/33] fix: README global node selector (#504) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b4eff39c2..c4e1a3256 100644 --- a/README.md +++ b/README.md @@ -634,9 +634,9 @@ global_tolerations: [] # Global node selector that will be passed to all containers (unless overridden by a more specific node selector) # Only works with Kubernetes -# Example: node_selectors: { "disktype": "ssd" } +# Example: global_node_selectors: { "disktype": "ssd" } # Defaults to empty -node_selectors: {} +global_node_selectors: {} ``` #### Example configurations From 836eda4eed3776dd406d354343655c0ff8b9d2b6 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Mon, 4 Mar 2024 16:29:37 +0100 Subject: [PATCH 28/33] feat: add keymanager to all validator processes (#502) --- .circleci/config.yml | 2 +- .github/tests/blobber.yaml | 2 - .github/tests/split-teku.yaml | 2 +- main.star | 10 ++++ src/cl/nimbus/nimbus_launcher.star | 30 ++++++++--- src/cl/teku/teku_launcher.star | 47 +++++++++++++++--- src/package_io/constants.star | 8 +++ src/participant_network.star | 10 +++- src/static_files/static_files.star | 4 ++ src/validator_client/lighthouse.star | 10 ++++ src/validator_client/lodestar.star | 6 +++ src/validator_client/nimbus.star | 7 +++ src/validator_client/prysm.star | 3 ++ src/validator_client/shared.star | 8 +++ src/validator_client/teku.star | 14 ++++++ .../validator_client_launcher.star | 13 +++++ static_files/keymanager/generate_certs.sh | 7 +++ static_files/keymanager/keymanager.txt | 1 + static_files/keymanager/openssl.cnf | 23 +++++++++ .../keymanager/validator_keystore.p12 | Bin 0 -> 2707 bytes 20 files changed, 189 insertions(+), 18 deletions(-) create mode 100644 static_files/keymanager/generate_certs.sh create mode 100644 static_files/keymanager/keymanager.txt create mode 100644 static_files/keymanager/openssl.cnf create mode 100644 static_files/keymanager/validator_keystore.p12 diff --git a/.circleci/config.yml b/.circleci/config.yml index 7161fef3a..e0cdfe8f6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,7 +6,7 @@ orbs: executors: ubuntu_vm: machine: - image: ubuntu-2204:2023.07.2 + image: ubuntu-2204:current parameters: should-enable-check-latest-version-workflow: diff --git a/.github/tests/blobber.yaml b/.github/tests/blobber.yaml index 9f72054b1..75480a1a4 100644 --- a/.github/tests/blobber.yaml +++ b/.github/tests/blobber.yaml @@ -2,7 +2,6 @@ participants: - el_client_type: geth el_client_image: ethpandaops/geth:master cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:sidecar-inclusion-proof-c6be31c blobber_enabled: true blobber_extra_params: - --proposal-action-frequency=1 @@ -11,7 +10,6 @@ participants: - el_client_type: geth el_client_image: ethpandaops/geth:master cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:blobs-inclproof-d5a5a47 count: 1 network_params: deneb_fork_epoch: 1 diff --git a/.github/tests/split-teku.yaml b/.github/tests/split-teku.yaml index c5dbe4bae..e2dfb6328 100644 --- a/.github/tests/split-teku.yaml +++ b/.github/tests/split-teku.yaml @@ -1,8 +1,8 @@ participants: - el_client_type: geth cl_client_type: teku - use_separate_validator_client: true validator_count: 0 + use_separate_validator_client: true - el_client_type: nethermind cl_client_type: teku use_separate_validator_client: true diff --git a/main.star b/main.star index 68bac3681..a52722e28 100644 --- a/main.star +++ b/main.star @@ -75,6 +75,14 @@ def run(plan, args={}): src=static_files.JWT_PATH_FILEPATH, name="jwt_file", ) + keymanager_file = plan.upload_files( + src=static_files.KEYMANAGER_PATH_FILEPATH, + name="keymanager_file", + ) + keymanager_p12_file = plan.upload_files( + src=static_files.KEYMANAGER_P12_PATH_FILEPATH, + name="keymanager_p12_file", + ) plan.print("Read the prometheus, grafana templates") plan.print( @@ -93,6 +101,8 @@ def run(plan, args={}): network_params, args_with_right_defaults.global_client_log_level, jwt_file, + keymanager_file, + keymanager_p12_file, persistent, xatu_sentry_params, global_tolerations, diff --git a/src/cl/nimbus/nimbus_launcher.star b/src/cl/nimbus/nimbus_launcher.star index bd6a2b0df..2995c88a6 100644 --- a/src/cl/nimbus/nimbus_launcher.star +++ b/src/cl/nimbus/nimbus_launcher.star @@ -5,7 +5,7 @@ cl_client_context = import_module("../../cl/cl_client_context.star") cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star") node_metrics = import_module("../../node_metrics_info.star") constants = import_module("../../package_io/constants.star") - +validator_client_shared = import_module("../../validator_client/shared.star") # ---------------------------------- Beacon client ------------------------------------- # Nimbus requires that its data directory already exists (because it expects you to bind-mount it), so we # have to to create it @@ -15,6 +15,7 @@ BEACON_TCP_DISCOVERY_PORT_ID = "tcp-discovery" BEACON_UDP_DISCOVERY_PORT_ID = "udp-discovery" BEACON_HTTP_PORT_ID = "http" BEACON_METRICS_PORT_ID = "metrics" +VALIDATOR_HTTP_PORT_ID = "http-validator" # Port nums BEACON_DISCOVERY_PORT_NUM = 9000 @@ -135,6 +136,7 @@ def launch( plan, launcher.el_cl_genesis_data, launcher.jwt_file, + launcher.keymanager_file, launcher.network, image, beacon_service_name, @@ -209,6 +211,7 @@ def get_beacon_config( plan, el_cl_genesis_data, jwt_file, + keymanager_file, network, image, service_name, @@ -296,11 +299,13 @@ def get_beacon_config( + constants.CL_CLIENT_TYPE.nimbus + "-" + el_client_context.client_name, + "--keymanager", + "--keymanager-port={0}".format(validator_client_shared.VALIDATOR_HTTP_PORT_NUM), + "--keymanager-address=0.0.0.0", + "--keymanager-allow-origin=*", + "--keymanager-token-file=" + constants.KEYMANAGER_MOUNT_PATH_ON_CONTAINER, ] - if node_keystore_files != None and not use_separate_validator_client: - cmd.extend(validator_flags) - if network not in constants.PUBLIC_NETWORKS: cmd.append( "--bootstrap-file=" @@ -325,10 +330,22 @@ def get_beacon_config( constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, } + beacon_validator_used_ports = {} + beacon_validator_used_ports.update(BEACON_USED_PORTS) if node_keystore_files != None and not use_separate_validator_client: + validator_http_port_id_spec = shared_utils.new_port_spec( + validator_client_shared.VALIDATOR_HTTP_PORT_NUM, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ) + beacon_validator_used_ports.update( + {VALIDATOR_HTTP_PORT_ID: validator_http_port_id_spec} + ) + cmd.extend(validator_flags) files[ VALIDATOR_KEYS_MOUNTPOINT_ON_CLIENTS ] = node_keystore_files.files_artifact_uuid + files[constants.KEYMANAGER_MOUNT_PATH_ON_CLIENTS] = keymanager_file if persistent: files[BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER] = Directory( @@ -338,7 +355,7 @@ def get_beacon_config( return ServiceConfig( image=image, - ports=BEACON_USED_PORTS, + ports=beacon_validator_used_ports, cmd=cmd, files=files, private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, @@ -362,9 +379,10 @@ def get_beacon_config( ) -def new_nimbus_launcher(el_cl_genesis_data, jwt_file, network): +def new_nimbus_launcher(el_cl_genesis_data, jwt_file, network, keymanager_file): return struct( el_cl_genesis_data=el_cl_genesis_data, jwt_file=jwt_file, network=network, + keymanager_file=keymanager_file, ) diff --git a/src/cl/teku/teku_launcher.star b/src/cl/teku/teku_launcher.star index abc0a87f0..b5aecdc3e 100644 --- a/src/cl/teku/teku_launcher.star +++ b/src/cl/teku/teku_launcher.star @@ -4,9 +4,10 @@ cl_client_context = import_module("../../cl/cl_client_context.star") node_metrics = import_module("../../node_metrics_info.star") cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star") constants = import_module("../../package_io/constants.star") +validator_client_shared = import_module("../../validator_client/shared.star") +# ---------------------------------- Beacon client ------------------------------------- TEKU_BINARY_FILEPATH_IN_IMAGE = "/opt/teku/bin/teku" -# ---------------------------------- Beacon client ------------------------------------- # The Docker container runs as the "teku" user so we can't write to root BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/teku/teku-beacon-data" @@ -15,6 +16,7 @@ BEACON_TCP_DISCOVERY_PORT_ID = "tcp-discovery" BEACON_UDP_DISCOVERY_PORT_ID = "udp-discovery" BEACON_HTTP_PORT_ID = "http" BEACON_METRICS_PORT_ID = "metrics" +VALIDATOR_HTTP_PORT_ID = "http-validator" # Port nums BEACON_DISCOVERY_PORT_NUM = 9000 @@ -124,6 +126,8 @@ def launch( plan, launcher.el_cl_genesis_data, launcher.jwt_file, + launcher.keymanager_file, + launcher.keymanager_p12_file, launcher.network, image, beacon_service_name, @@ -200,6 +204,8 @@ def get_beacon_config( plan, el_cl_genesis_data, jwt_file, + keymanager_file, + keymanager_p12_file, network, image, service_name, @@ -290,11 +296,19 @@ def get_beacon_config( + constants.CL_CLIENT_TYPE.teku + "-" + el_client_context.client_name, + "--validator-api-enabled=true", + "--validator-api-host-allowlist=*", + "--validator-api-port={0}".format( + validator_client_shared.VALIDATOR_HTTP_PORT_NUM + ), + "--validator-api-interface=0.0.0.0", + "--validator-api-keystore-file=" + + constants.KEYMANAGER_P12_MOUNT_PATH_ON_CONTAINER, + "--validator-api-keystore-password-file=" + + constants.KEYMANAGER_MOUNT_PATH_ON_CONTAINER, + "--validator-api-docs-enabled=true", ] - if node_keystore_files != None and not use_separate_validator_client: - cmd.extend(validator_flags) - if network not in constants.PUBLIC_NETWORKS: cmd.append( "--initial-state=" @@ -366,10 +380,23 @@ def get_beacon_config( constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, } + beacon_validator_used_ports = {} + beacon_validator_used_ports.update(BEACON_USED_PORTS) if node_keystore_files != None and not use_separate_validator_client: + validator_http_port_id_spec = shared_utils.new_port_spec( + validator_client_shared.VALIDATOR_HTTP_PORT_NUM, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ) + beacon_validator_used_ports.update( + {VALIDATOR_HTTP_PORT_ID: validator_http_port_id_spec} + ) + cmd.extend(validator_flags) files[ VALIDATOR_KEYS_DIRPATH_ON_SERVICE_CONTAINER ] = node_keystore_files.files_artifact_uuid + files[constants.KEYMANAGER_MOUNT_PATH_ON_CLIENTS] = keymanager_file + files[constants.KEYMANAGER_P12_MOUNT_PATH_ON_CLIENTS] = keymanager_p12_file if persistent: files[BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER] = Directory( @@ -378,7 +405,7 @@ def get_beacon_config( ) return ServiceConfig( image=image, - ports=BEACON_USED_PORTS, + ports=beacon_validator_used_ports, cmd=cmd, # entrypoint=ENTRYPOINT_ARGS, files=files, @@ -403,7 +430,13 @@ def get_beacon_config( ) -def new_teku_launcher(el_cl_genesis_data, jwt_file, network): +def new_teku_launcher( + el_cl_genesis_data, jwt_file, network, keymanager_file, keymanager_p12_file +): return struct( - el_cl_genesis_data=el_cl_genesis_data, jwt_file=jwt_file, network=network + el_cl_genesis_data=el_cl_genesis_data, + jwt_file=jwt_file, + network=network, + keymanager_file=keymanager_file, + keymanager_p12_file=keymanager_p12_file, ) diff --git a/src/package_io/constants.star b/src/package_io/constants.star index 8c45120b2..0368abe4d 100644 --- a/src/package_io/constants.star +++ b/src/package_io/constants.star @@ -55,6 +55,14 @@ GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER = ( JWT_MOUNTPOINT_ON_CLIENTS = "/jwt" JWT_MOUNT_PATH_ON_CONTAINER = JWT_MOUNTPOINT_ON_CLIENTS + "/jwtsecret" +KEYMANAGER_MOUNT_PATH_ON_CLIENTS = "/keymanager" +KEYMANAGER_MOUNT_PATH_ON_CONTAINER = ( + KEYMANAGER_MOUNT_PATH_ON_CLIENTS + "/keymanager.txt" +) +KEYMANAGER_P12_MOUNT_PATH_ON_CLIENTS = "/keymanager-p12" +KEYMANAGER_P12_MOUNT_PATH_ON_CONTAINER = ( + KEYMANAGER_P12_MOUNT_PATH_ON_CLIENTS + "/validator_keystore.p12" +) GENESIS_FORK_VERSION = "0x10000038" BELLATRIX_FORK_VERSION = "0x30000038" diff --git a/src/participant_network.star b/src/participant_network.star index 696ee53a5..78e81972c 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -68,6 +68,8 @@ def launch_participant_network( network_params, global_log_level, jwt_file, + keymanager_file, + keymanager_p12_file, persistent, xatu_sentry_params, global_tolerations, @@ -523,7 +525,7 @@ def launch_participant_network( }, constants.CL_CLIENT_TYPE.nimbus: { "launcher": nimbus.new_nimbus_launcher( - el_cl_data, jwt_file, network_params.network + el_cl_data, jwt_file, network_params.network, keymanager_file ), "launch_method": nimbus.launch, }, @@ -542,6 +544,8 @@ def launch_participant_network( el_cl_data, jwt_file, network_params.network, + keymanager_file, + keymanager_p12_file, ), "launch_method": teku.launch, }, @@ -775,6 +779,8 @@ def launch_participant_network( launcher=validator_client.new_validator_client_launcher( el_cl_genesis_data=el_cl_data ), + keymanager_file=keymanager_file, + keymanager_p12_file=keymanager_p12_file, service_name="vc-{0}-{1}-{2}".format( index_str, validator_client_type, el_client_type ), @@ -797,6 +803,8 @@ def launch_participant_network( participant_tolerations=participant.tolerations, global_tolerations=global_tolerations, node_selectors=node_selectors, + network=network_params.network, # TODO: remove when deneb rebase is done + electra_fork_epoch=network_params.electra_fork_epoch, # TODO: remove when deneb rebase is done ) all_validator_client_contexts.append(validator_client_context) diff --git a/src/static_files/static_files.star b/src/static_files/static_files.star index 40eb2254e..98d2c838e 100644 --- a/src/static_files/static_files.star +++ b/src/static_files/static_files.star @@ -68,5 +68,9 @@ CL_GENESIS_GENERATION_MNEMONICS_TEMPLATE_FILEPATH = ( ) JWT_PATH_FILEPATH = STATIC_FILES_DIRPATH + "/jwt/jwtsecret" +KEYMANAGER_PATH_FILEPATH = STATIC_FILES_DIRPATH + "/keymanager/keymanager.txt" +KEYMANAGER_P12_PATH_FILEPATH = ( + STATIC_FILES_DIRPATH + "/keymanager/validator_keystore.p12" +) SHADOWFORK_FILEPATH = "/network-configs/latest_block.json" diff --git a/src/validator_client/lighthouse.star b/src/validator_client/lighthouse.star index 2fcc833ce..9de7f6110 100644 --- a/src/validator_client/lighthouse.star +++ b/src/validator_client/lighthouse.star @@ -32,6 +32,8 @@ def get_config( extra_labels, tolerations, node_selectors, + network, + electra_fork_epoch, ): log_level = input_parser.get_client_log_level_or_default( participant_log_level, global_log_level, VERBOSITY_LEVELS @@ -60,6 +62,11 @@ def get_config( # "--enable-doppelganger-protection", // Disabled to not have to wait 2 epochs before validator can start # burn address - If unset, the validator will scream in its logs "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, + "--http", + "--http-port={0}".format(validator_client_shared.VALIDATOR_HTTP_PORT_NUM), + "--http-address=0.0.0.0", + "--http-allow-origin=*", + "--unencrypted-http-transport", # vvvvvvvvvvvvvvvvvvv PROMETHEUS CONFIG vvvvvvvvvvvvvvvvvvvvv "--metrics", "--metrics-address=0.0.0.0", @@ -74,6 +81,9 @@ def get_config( + el_client_context.client_name, ] + if not (constants.NETWORK_NAME.verkle in network and electra_fork_epoch == None): + cmd.append("--produce-block-v3") + if len(extra_params): cmd.extend([param for param in extra_params]) diff --git a/src/validator_client/lodestar.star b/src/validator_client/lodestar.star index c60c1b025..23e02e044 100644 --- a/src/validator_client/lodestar.star +++ b/src/validator_client/lodestar.star @@ -54,6 +54,11 @@ def get_config( "--keystoresDir=" + validator_keys_dirpath, "--secretsDir=" + validator_secrets_dirpath, "--suggestedFeeRecipient=" + constants.VALIDATING_REWARDS_ACCOUNT, + "--keymanager", + "--keymanager.authEnabled=true", + "--keymanager.port={0}".format(validator_client_shared.VALIDATOR_HTTP_PORT_NUM), + "--keymanager.address=0.0.0.0", + "--keymanager.cors=*", # vvvvvvvvvvvvvvvvvvv PROMETHEUS CONFIG vvvvvvvvvvvvvvvvvvvvv "--metrics", "--metrics.address=0.0.0.0", @@ -65,6 +70,7 @@ def get_config( + cl_client_context.client_name + "-" + el_client_context.client_name, + "--useProduceBlockV3", ] if len(extra_params) > 0: diff --git a/src/validator_client/nimbus.star b/src/validator_client/nimbus.star index 7a6ecaed5..164b35ac6 100644 --- a/src/validator_client/nimbus.star +++ b/src/validator_client/nimbus.star @@ -6,6 +6,7 @@ validator_client_shared = import_module("./shared.star") def get_config( el_cl_genesis_data, image, + keymanager_file, beacon_http_url, cl_client_context, el_client_context, @@ -36,6 +37,11 @@ def get_config( "--validators-dir=" + validator_keys_dirpath, "--secrets-dir=" + validator_secrets_dirpath, "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, + "--keymanager", + "--keymanager-port={0}".format(validator_client_shared.VALIDATOR_HTTP_PORT_NUM), + "--keymanager-address=0.0.0.0", + "--keymanager-allow-origin=*", + "--keymanager-token-file=" + constants.KEYMANAGER_MOUNT_PATH_ON_CONTAINER, # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv "--metrics", "--metrics-address=0.0.0.0", @@ -54,6 +60,7 @@ def get_config( files = { validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + constants.KEYMANAGER_MOUNT_PATH_ON_CLIENTS: keymanager_file, } return ServiceConfig( diff --git a/src/validator_client/prysm.star b/src/validator_client/prysm.star index 5c05ac9c1..e5ed58e4e 100644 --- a/src/validator_client/prysm.star +++ b/src/validator_client/prysm.star @@ -47,6 +47,9 @@ def get_config( "--wallet-dir=" + validator_keys_dirpath, "--wallet-password-file=" + validator_secrets_dirpath, "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, + "--rpc", + "--rpc-port={0}".format(validator_client_shared.VALIDATOR_HTTP_PORT_NUM), + "--rpc-host=0.0.0.0", # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv "--disable-monitoring=false", "--monitoring-host=0.0.0.0", diff --git a/src/validator_client/shared.star b/src/validator_client/shared.star index fbbf27108..1eb7cdbd3 100644 --- a/src/validator_client/shared.star +++ b/src/validator_client/shared.star @@ -3,11 +3,19 @@ shared_utils = import_module("../shared_utils/shared_utils.star") PRIVATE_IP_ADDRESS_PLACEHOLDER = "KURTOSIS_IP_ADDR_PLACEHOLDER" VALIDATOR_CLIENT_KEYS_MOUNTPOINT = "/keystores" +VALIDATOR_HTTP_PORT_NUM = 5056 +VALIDATOR_HTTP_PORT_ID = "http" + VALIDATOR_CLIENT_METRICS_PORT_NUM = 8080 VALIDATOR_CLIENT_METRICS_PORT_ID = "metrics" METRICS_PATH = "/metrics" VALIDATOR_CLIENT_USED_PORTS = { + VALIDATOR_HTTP_PORT_ID: shared_utils.new_port_spec( + VALIDATOR_HTTP_PORT_NUM, + shared_utils.TCP_PROTOCOL, + shared_utils.HTTP_APPLICATION_PROTOCOL, + ), VALIDATOR_CLIENT_METRICS_PORT_ID: shared_utils.new_port_spec( VALIDATOR_CLIENT_METRICS_PORT_NUM, shared_utils.TCP_PROTOCOL, diff --git a/src/validator_client/teku.star b/src/validator_client/teku.star index f644babf3..2eb69134a 100644 --- a/src/validator_client/teku.star +++ b/src/validator_client/teku.star @@ -5,6 +5,8 @@ validator_client_shared = import_module("./shared.star") def get_config( el_cl_genesis_data, + keymanager_file, + keymanager_p12_file, image, beacon_http_url, cl_client_context, @@ -47,6 +49,16 @@ def get_config( + cl_client_context.client_name + "-" + el_client_context.client_name, + "--validator-api-enabled=true", + "--validator-api-host-allowlist=*", + "--validator-api-port={0}".format( + validator_client_shared.VALIDATOR_HTTP_PORT_NUM + ), + "--validator-api-interface=0.0.0.0", + "--validator-api-keystore-file=" + + constants.KEYMANAGER_P12_MOUNT_PATH_ON_CONTAINER, + "--validator-api-keystore-password-file=" + + constants.KEYMANAGER_MOUNT_PATH_ON_CONTAINER, # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv "--metrics-enabled=true", "--metrics-host-allowlist=*", @@ -63,6 +75,8 @@ def get_config( files = { constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + constants.KEYMANAGER_MOUNT_PATH_ON_CLIENTS: keymanager_file, + constants.KEYMANAGER_P12_MOUNT_PATH_ON_CLIENTS: keymanager_p12_file, } return ServiceConfig( diff --git a/src/validator_client/validator_client_launcher.star b/src/validator_client/validator_client_launcher.star index 2d0fbcc09..0e9ab69cb 100644 --- a/src/validator_client/validator_client_launcher.star +++ b/src/validator_client/validator_client_launcher.star @@ -20,6 +20,8 @@ MAX_MEMORY = 512 def launch( plan, launcher, + keymanager_file, + keymanager_p12_file, service_name, validator_client_type, image, @@ -40,6 +42,8 @@ def launch( participant_tolerations, global_tolerations, node_selectors, + network, # TODO: remove when deneb rebase is done + electra_fork_epoch, # TODO: remove when deneb rebase is done ): if node_keystore_files == None: return None @@ -76,6 +80,8 @@ def launch( extra_labels=extra_labels, tolerations=tolerations, node_selectors=node_selectors, + network=network, # TODO: remove when deneb rebase is done + electra_fork_epoch=electra_fork_epoch, # TODO: remove when deneb rebase is done ) elif validator_client_type == constants.VC_CLIENT_TYPE.lodestar: config = lodestar.get_config( @@ -99,6 +105,8 @@ def launch( elif validator_client_type == constants.VC_CLIENT_TYPE.teku: config = teku.get_config( el_cl_genesis_data=launcher.el_cl_genesis_data, + keymanager_file=keymanager_file, + keymanager_p12_file=keymanager_p12_file, image=image, beacon_http_url=beacon_http_url, cl_client_context=cl_client_context, @@ -116,6 +124,7 @@ def launch( elif validator_client_type == constants.VC_CLIENT_TYPE.nimbus: config = nimbus.get_config( el_cl_genesis_data=launcher.el_cl_genesis_data, + keymanager_file=keymanager_file, image=image, beacon_http_url=beacon_http_url, cl_client_context=cl_client_context, @@ -168,6 +177,10 @@ def launch( service_name, validator_client_shared.METRICS_PATH, validator_metrics_url ) + validator_http_port = validator_service.ports[ + validator_client_shared.VALIDATOR_HTTP_PORT_ID + ] + return validator_client_context.new_validator_client_context( service_name=service_name, client_name=validator_client_type, diff --git a/static_files/keymanager/generate_certs.sh b/static_files/keymanager/generate_certs.sh new file mode 100644 index 000000000..b323df85c --- /dev/null +++ b/static_files/keymanager/generate_certs.sh @@ -0,0 +1,7 @@ +# To run this script, you need to have openssl installed on your machine +# This script generates a self-signed certificate and a private key, and then exports them to a PKCS12 keystore +# The keystore is encrypted with a password that is stored in a file called keymanager.txt +# The keystore is then saved to a file called validator_keystore.p12 +# https://docs.teku.consensys.io/23.12.0/how-to/use-external-signer/manage-keys#support-multiple-domains-and-ips + +openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -config openssl.cnf | openssl pkcs12 -export -out validator_keystore.p12 -passout file:keymanager.txt diff --git a/static_files/keymanager/keymanager.txt b/static_files/keymanager/keymanager.txt new file mode 100644 index 000000000..8afbcde70 --- /dev/null +++ b/static_files/keymanager/keymanager.txt @@ -0,0 +1 @@ +api-token-0x7443c65f8cb0eb4ef6ab78c173d085f28b349f40dda27c74604439e07848a6d4 \ No newline at end of file diff --git a/static_files/keymanager/openssl.cnf b/static_files/keymanager/openssl.cnf new file mode 100644 index 000000000..0afbc9731 --- /dev/null +++ b/static_files/keymanager/openssl.cnf @@ -0,0 +1,23 @@ +[req] +distinguished_name = Kurtosis +x509_extensions = v3_req +prompt = no + +[Kurtosis] +countryName = EU +stateOrProvinceName = CA +localityName = San Francisco +organizationName = Kurtosis +organizationalUnitName = ethereum-package + +[v3_req] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +basicConstraints = CA:TRUE +subjectAltName = @alt_names + +[alt_names] +DNS.1 = mydomain.com +DNS.2 = localhost +IP.1 = 127.0.0.1 +IP.2 = 10.0.0.6 diff --git a/static_files/keymanager/validator_keystore.p12 b/static_files/keymanager/validator_keystore.p12 new file mode 100644 index 0000000000000000000000000000000000000000..c8267284102b2b247f968e0d0c5b68bf03345dd2 GIT binary patch literal 2707 zcmai$X*AS}8^_J~8I8zD_T5;^1gV^bH1PNi|5_vJSYOw12BjlMPSl{K#}o=@jqEWjGzJn zlL|m!QaHhKC<2iCZv`nJ0Ldrl9vDP-vZDTNKq#l*fq?~OiIV>}GNZ%**inbhY~frP z+LyUED_lh-0}MVzhhm1D`rlPBlz|Q<0D(Hk8-i}q10W;-`?jK)3c7o%I5;A#u9CIa zE{XuaTCd!~q|`2C^agdE3$1NVupKiI3@-xP!1-8ZCQ(18Vb)^ zKeaGhS`l_+*J;gvO_S7nx_>G3W=(ITbM3H=F4y25^MVNyI5n#j_-B&y_n^sUq3i4D z@6Q+c^ao}mrG0qZ9OM+gMNRP98oo$GDgZo!^I6%;Bh}(|HSglA4h3wo56}&8nCJ!H zYhH#Uow=hjD!G#bUPPSdVTij-z~#WYR!ww#`~yewS!vGN@JPQ2?7@Ik5Me*Ap7v30 zHu)Ek`82}uY27Hd4mmWgmaI2Z=}@rqnkT%msgP8jq^)Lk)`5{I?+|;9LCS`rE#mv8 zGHjAjOZx+rB8a_b(WPg3wZ%1TyN@WaO*4`hF-k7G*QFJIfa}b|aP6BHki%LZwg4Te zdp_5m9!P+Wfi+ToFmA}QIe`t#QdNNzH5CtB>Ncwf_9%`H^7r#6-dWAic<%%kawX45yBa3eE&+x5&pF$)k&X{2aedPk0;e$W@ox==Z;# z@V+HXzsM5e4-r)H|5_shIc4rQ=VV{qm_AqJ@r|*~tW82+&8$VUdP!@>9KG~dSffqN zD9-w(-N7=|oZAU7zoz!$t5kz zI<8o>|0^z+`+R=OCW^46&Y=41O)q0%n|_L^a)-?^4O31ceYFop3o;%_>Bha_?%X=` zW!;#g37k>fn`h(n>#=aDKx$iVA=4AS$jrMHebjL0X%>tNSLb zA7^^mlSQH&q28%~h2tJzyP=muXKd-pvkMy)0bvPY6L`ox_N~Xh$0Icu^unLv1CZpG z{!i{48@=&ex&3+i8=cRajlzt3L1cR&;<=LH7wlWll)P%HSl5O+o-#Y{go;yOGi3_V zM#=ejZkvvAtMt4hh`jT@7;6%f8wJvyYLw9ZC1KsG9CaO|D=M_>t3KtzCM)K%SzX~6 zU8|r0szebO?*I18KjLBt2M7#zPH^DK+=iU~ztfnRK;V;7^#m&am#8G7togcv$1`nr zriV0!+hPAHDv?3;&!l!bUPkQKhChHPSAYNvBbf~18x^`Gv`;@SM=;sFec&9cFe~Yz zH7)5V^)4yKmt9u$Pm+ zweB<*m$>+Vmel;%Mc?p|wb?YG`JU;?;SE{Rmwot0N{(A7%eVVsFFe_cwCPVngVmz~ssmUo?@*Z8+$ zP7g#$Z;t+B%t7S$i!^ck9n3;SY*4q=8@_z<0I542DKr5`u!L(R4S4O8WOe6V7bAV~ zlj9%NUtz~zBAJ9BNMPcHaefM?A)1lna)0@=eolANhTTOO6%agPKaGsC*j?66SP#o( z8{0?j>ssn$H`kBXHTcdJ z^Vg+|tp_wYZPKtE?n&?F&{=&VTo!N~IYOl;UP4nvB}P$E#w|Ed{a`iOs!cvnF(e5& z?3;WC0=97+QK2{tcP~;O9{TNVa#TI+P-8FM`PHu$6dA7Lyz!2@#&K&1Hq_CIxC5)~ zqk|azv7@M3{wLPx(KI*%4lg47#F5sMhSNCie5%w>wNi?lg;?>?SG9+LinoYN-Dt>hT-A;+Qm>~(8jb>OVE{ z6*7yEy#@?ij#Tx+t%&#l4YmsDgsp_& z;aVcqBaMq3)-L#Xe?T-U?n$Ap%q{Hv6)D;G+B|+H>$M+fv71AuM;I%^CAFU}2io0k zBK>$yvb>gyvrW7ZSGkVloa{F>Po}mFtB+t_EeW)kqnCmGs+cwX!ZW{$KmL^S#jZDZ z_N8<3%)v!_DpuJ;a$KArw74U#Ry|(ilr=OW>rR?}@S@aq25c-ubrmX-9F={lwuTPm zjJ*#NuB4Wf9z3R+Gw~_@G9+d8Bz~m?F@6+dDJaPk4Mh^( z$3*)X&*;H;?#)fsW(iCCE@v{u4r!K(EyC4vs$#k@*LrrWw*ig(^@uU4uZlxfMB5n0 zkmTm-hRizI?5I8kQPnI@G5k6_18vD?gU=wa%@qYmRkzW62Q+xut)=4x6NQUQ&5g>^ zBYkTqKIQ^LusphUz8N3QCpG&*-hEJ_dzq{muZat{{e{^5Q7?Z_z?IK$;q6;RTJj=w#eq{R+#Ufn2^9E0s{I=`LflUP literal 0 HcmV?d00001 From bc5e725edf8c917d409e6de6ce838797ad166173 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Tue, 5 Mar 2024 13:51:11 +0100 Subject: [PATCH 29/33] fix: lh vc flag logic (#506) --- src/validator_client/lighthouse.star | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/validator_client/lighthouse.star b/src/validator_client/lighthouse.star index 9de7f6110..9846691e9 100644 --- a/src/validator_client/lighthouse.star +++ b/src/validator_client/lighthouse.star @@ -81,7 +81,7 @@ def get_config( + el_client_context.client_name, ] - if not (constants.NETWORK_NAME.verkle in network and electra_fork_epoch == None): + if not (constants.NETWORK_NAME.verkle in network or electra_fork_epoch != None): cmd.append("--produce-block-v3") if len(extra_params): From 6fa04751cd1277a4870dc45144e15ffa5d637b93 Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Tue, 5 Mar 2024 14:06:36 +0100 Subject: [PATCH 30/33] feat: make snapshot url configurable (#507) --- .github/tests/holesky-shadowfork-verkle.yaml_norun | 1 - README.md | 7 +++++++ network_params.yaml | 1 + src/package_io/input_parser.star | 2 ++ src/participant_network.star | 6 ++++-- 5 files changed, 14 insertions(+), 3 deletions(-) diff --git a/.github/tests/holesky-shadowfork-verkle.yaml_norun b/.github/tests/holesky-shadowfork-verkle.yaml_norun index 84c79cad6..52b3741d5 100644 --- a/.github/tests/holesky-shadowfork-verkle.yaml_norun +++ b/.github/tests/holesky-shadowfork-verkle.yaml_norun @@ -10,7 +10,6 @@ participants: network_params: electra_fork_epoch: 1 network: holesky-shadowfork-verkle - genesis_delay: 300 additional_services: - dora snooper_enabled: true diff --git a/README.md b/README.md index c4e1a3256..bbae4e3bf 100644 --- a/README.md +++ b/README.md @@ -422,6 +422,13 @@ network_params: # Defaults to 256 epoch ~27 hours shard_committee_period: 256 + # Network sync base url for syncing public networks from a custom snapshot (mostly useful for shadowforks) + # Defaults to "https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/ + # If you have a local snapshot, you can set this to the local url: + # network_snapshot_url_base = "http://10.10.101.21:10000/snapshots/" + # The snapshots are taken with https://github.com/ethpandaops/snapshotter + network_sync_base_url: https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/ + # Configuration place for transaction spammer - https:#github.com/MariusVanDerWijden/tx-fuzz tx_spammer_params: # A list of optional extra params that will be passed to the TX Spammer container for modifying its behaviour diff --git a/network_params.yaml b/network_params.yaml index 35d2078d1..bbe181b0b 100644 --- a/network_params.yaml +++ b/network_params.yaml @@ -58,6 +58,7 @@ network_params: network: kurtosis min_validator_withdrawability_delay: 256 shard_committee_period: 256 + network_sync_base_url: https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/ additional_services: - tx_spammer diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index 7b4e050dd..585fc5db9 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -245,6 +245,7 @@ def input_parser(plan, input_args): "min_validator_withdrawability_delay" ], shard_committee_period=result["network_params"]["shard_committee_period"], + network_sync_base_url=result["network_params"]["network_sync_base_url"], ), mev_params=struct( mev_relay_image=result["mev_params"]["mev_relay_image"], @@ -614,6 +615,7 @@ def default_network_params(): "network": "kurtosis", "min_validator_withdrawability_delay": 256, "shard_committee_period": 256, + "network_sync_base_url": "https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/", } diff --git a/src/participant_network.star b/src/participant_network.star index 78e81972c..367ac2954 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -110,7 +110,8 @@ def launch_participant_network( ] # overload the network id to match the network name latest_block = plan.run_sh( # fetch the latest block run="mkdir -p /shadowfork && \ - curl -o /shadowfork/latest_block.json https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/" + curl -o /shadowfork/latest_block.json " + + network_params.network_sync_base_url + base_network + "/geth/" + shadowfork_block @@ -146,7 +147,8 @@ def launch_participant_network( config=ServiceConfig( image="alpine:3.19.1", cmd=[ - "apk add --no-cache curl tar zstd && curl -s -L https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/" + "apk add --no-cache curl tar zstd && curl -s -L " + + network_params.network_sync_base_url + base_network + "/" + el_client_type From da55be84861e93ce777076e545abee35ff2d51ce Mon Sep 17 00:00:00 2001 From: pk910 Date: Wed, 6 Mar 2024 18:00:06 +0100 Subject: [PATCH 31/33] fix: fix end index in validator ranges file (#509) The ranges.yaml currently looks like this: ``` 0-250: cl-1-lighthouse-geth 250-500: cl-2-prysm-reth 500-750: cl-3-lodestar-besu ``` but the end index should be inclusive: ``` 0-249: cl-1-lighthouse-geth 250-499: cl-2-prysm-reth 500-749: cl-3-lodestar-besu ``` --- .../validator_keystores/validator_ranges_generator.star | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/prelaunch_data_generator/validator_keystores/validator_ranges_generator.star b/src/prelaunch_data_generator/validator_keystores/validator_ranges_generator.star index f6489198c..943bcdd84 100644 --- a/src/prelaunch_data_generator/validator_keystores/validator_ranges_generator.star +++ b/src/prelaunch_data_generator/validator_keystores/validator_ranges_generator.star @@ -16,7 +16,7 @@ def generate_validator_ranges( continue start_index = running_total_validator_count running_total_validator_count += participant.validator_count - end_index = start_index + participant.validator_count + end_index = start_index + participant.validator_count - 1 service_name = client.beacon_service_name data.append( { From fab341b158329b9e8c2b590dc63127dfd1d2495f Mon Sep 17 00:00:00 2001 From: Barnabas Busa Date: Fri, 8 Mar 2024 12:20:40 +0100 Subject: [PATCH 32/33] refactor!: participant_network & rename participant fields. (#508) # Important! There are many participant fields that have been renamed to be more consistent with the rest of the package. The following fields have been renamed: ### EL Flags ``` el_client_type -> el_type el_client_image -> el_image el_client_log_level -> el_log_level el_client_volume_size -> el_volume_size ``` ### CL Flags ``` cl_client_type -> cl_type cl_client_image -> cl_image cl_client_volume_size -> cl_volume_size cl_client_log_level -> cl_log_level beacon_extra_params -> cl_extra_params beacon_extra_labels -> cl_extra_labels bn_min_cpu -> cl_min_cpu bn_max_cpu -> cl_max_cpu bn_min_mem -> cl_min_mem bn_max_mem -> cl_max_mem use_separate_validator_client -> use_separate_vc ``` ### Validator flags ``` validator_client_type -> vc_type validator_tolerations -> vc_tolerations validator_client_image -> vc_image validator_extra_params -> vc_extra_params validator_extra_labels -> vc_extra_labels v_min_cpu -> vc_min_cpu v_max_cpu -> vc_max_cpu v_min_mem -> vc_min_mem v_max_mem -> vc_max_mem ``` ### Global flags ``` global_client_log_level -> global_log_level ``` Once this PR is merged, the old names will no longer work, and you will have to bulk rename all your yaml files. A rename.sh bash script is added, which can be used to do bulk `find and replace` operations. ```bash rename.sh yourFile.yaml ``` --------- Co-authored-by: Gyanendra Mishra --- .github/tests/assertoor.yaml | 8 +- .github/tests/besu-all.yaml | 24 +- .github/tests/blobber.yaml | 12 +- .github/tests/dencun-devnet-12.yaml | 24 +- .github/tests/dencun-genesis.yaml | 24 +- .github/tests/disable-peer-scoring.yaml | 24 +- .github/tests/ephemery.yaml | 24 +- .github/tests/erigon-all.yaml | 20 +- .github/tests/ethereumjs-all.yaml | 20 +- .github/tests/geth-all.yaml | 20 +- .../holesky-shadowfork-verkle.yaml_norun | 16 +- .github/tests/holesky-shadowfork.yaml_norun | 8 +- .github/tests/lighthouse-all.yaml | 24 +- .github/tests/lodestar-all.yaml | 24 +- .github/tests/mix-persistence-k8s.yaml | 28 +- .github/tests/mix-persistence.yaml | 28 +- .github/tests/mix-with-capella.yaml | 24 +- .github/tests/mix-with-tools-mev.yaml | 24 +- .github/tests/mix-with-tools.yaml | 24 +- .github/tests/mix.yaml | 24 +- .github/tests/mixed-cl-vc.yml | 16 +- .github/tests/nethermind-all.yaml | 20 +- .github/tests/nimbus-all.yaml | 24 +- .github/tests/nimbus-eth1-all.yaml | 24 +- .github/tests/nimbus-mev.yaml | 4 +- .github/tests/node-selectors.yaml | 12 +- .github/tests/parallel-keystores-1.yaml | 8 +- .github/tests/parallel-keystores-2.yaml | 8 +- .github/tests/parallel-keystores-3.yaml | 12 +- .../tests/preregistered_validator_count.yaml | 20 +- .github/tests/prysm-all.yaml | 24 +- .github/tests/reth-all.yaml | 20 +- .github/tests/sepolia-mix.yaml | 25 +- .github/tests/split-nimbus.yaml | 36 +- .github/tests/split-teku.yaml | 36 +- .github/tests/teku-all.yaml | 24 +- .github/tests/tolerations.yaml | 20 +- .github/tests/verkle-gen-devnet-4.yaml | 16 +- .github/tests/verkle-gen.yaml | 16 +- .github/tests/verkle.yaml | 16 +- README.md | 424 +++++---- docs/architecture.md | 6 +- main.star | 74 +- network_params.yaml | 119 ++- rename.sh | 82 ++ src/assertoor/assertoor_launcher.star | 16 +- .../beacon_metrics_gazer_launcher.star | 6 +- src/blob_spammer/blob_spammer.star | 12 +- src/blobber/blobber_launcher.star | 2 +- src/blobscan/blobscan_launcher.star | 8 +- src/blockscout/blockscout_launcher.star | 8 +- src/broadcaster/broadcaster.star | 8 +- ...cl_client_context.star => cl_context.star} | 2 +- src/cl/cl_launcher.star | 212 +++++ src/cl/lighthouse/lighthouse_launcher.star | 93 +- src/cl/lodestar/lodestar_launcher.star | 94 +- src/cl/nimbus/nimbus_launcher.star | 107 +-- src/cl/prysm/prysm_launcher.star | 90 +- src/cl/teku/teku_launcher.star | 111 +-- src/dora/dora_launcher.star | 4 +- src/el/besu/besu_launcher.star | 16 +- ...el_client_context.star => el_context.star} | 2 +- src/el/el_launcher.star | 163 ++++ src/el/erigon/erigon_launcher.star | 16 +- src/el/ethereumjs/ethereumjs_launcher.star | 16 +- src/el/geth/geth_launcher.star | 16 +- src/el/nethermind/nethermind_launcher.star | 16 +- src/el/nimbus-eth1/nimbus_launcher.star | 16 +- src/el/reth/reth_launcher.star | 16 +- src/el_forkmon/el_forkmon_launcher.star | 4 +- .../ethereum_metrics_exporter_launcher.star | 16 +- .../full_beaconchain_launcher.star | 8 +- src/goomy_blob/goomy_blob.star | 20 +- src/mev/mock_mev/mock_mev_launcher.star | 4 +- src/network_launcher/devnet.star | 34 + src/network_launcher/ephemery.star | 30 + src/network_launcher/kurtosis.star | 95 ++ src/network_launcher/public_network.star | 23 + src/network_launcher/shadowfork.star | 107 +++ src/package_io/constants.star | 12 +- src/package_io/input_parser.star | 285 +++--- src/participant.star | 24 +- src/participant_network.star | 851 +++--------------- .../validator_keystore_generator.star | 8 +- .../validator_ranges_generator.star | 4 +- src/prometheus/prometheus_launcher.star | 28 +- src/shared_utils/shared_utils.star | 41 + src/snooper/snooper_engine_launcher.star | 12 +- src/{validator_client => vc}/lighthouse.star | 63 +- src/{validator_client => vc}/lodestar.star | 59 +- src/{validator_client => vc}/nimbus.star | 49 +- src/{validator_client => vc}/prysm.star | 49 +- src/{validator_client => vc}/shared.star | 0 src/{validator_client => vc}/teku.star | 50 +- .../vc_context.star} | 2 +- .../vc_launcher.star} | 133 +-- src/xatu_sentry/xatu_sentry_launcher.star | 6 +- 97 files changed, 2469 insertions(+), 2108 deletions(-) create mode 100755 rename.sh rename src/cl/{cl_client_context.star => cl_context.star} (96%) create mode 100644 src/cl/cl_launcher.star rename src/el/{el_client_context.star => el_context.star} (94%) create mode 100644 src/el/el_launcher.star create mode 100644 src/network_launcher/devnet.star create mode 100644 src/network_launcher/ephemery.star create mode 100644 src/network_launcher/kurtosis.star create mode 100644 src/network_launcher/public_network.star create mode 100644 src/network_launcher/shadowfork.star rename src/{validator_client => vc}/lighthouse.star (66%) rename src/{validator_client => vc}/lodestar.star (61%) rename src/{validator_client => vc}/nimbus.star (62%) rename src/{validator_client => vc}/prysm.star (67%) rename src/{validator_client => vc}/shared.star (100%) rename src/{validator_client => vc}/teku.star (69%) rename src/{validator_client/validator_client_context.star => vc/vc_context.star} (84%) rename src/{validator_client/validator_client_launcher.star => vc/vc_launcher.star} (60%) diff --git a/.github/tests/assertoor.yaml b/.github/tests/assertoor.yaml index b56580c47..c5be467ef 100644 --- a/.github/tests/assertoor.yaml +++ b/.github/tests/assertoor.yaml @@ -1,9 +1,9 @@ participants: - - el_client_type: geth - cl_client_type: lighthouse + - el_type: geth + cl_type: lighthouse count: 1 - - el_client_type: geth - cl_client_type: lodestar + - el_type: geth + cl_type: lodestar count: 1 additional_services: - assertoor diff --git a/.github/tests/besu-all.yaml b/.github/tests/besu-all.yaml index f1d6d9603..8e5e8b98e 100644 --- a/.github/tests/besu-all.yaml +++ b/.github/tests/besu-all.yaml @@ -1,14 +1,14 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: geth - cl_client_type: teku - - el_client_type: besu - cl_client_type: prysm - - el_client_type: besu - cl_client_type: nimbus - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: besu - cl_client_type: lodestar + - el_type: geth + cl_type: teku + - el_type: geth + cl_type: teku + - el_type: besu + cl_type: prysm + - el_type: besu + cl_type: nimbus + - el_type: besu + cl_type: lighthouse + - el_type: besu + cl_type: lodestar additional_services: [] diff --git a/.github/tests/blobber.yaml b/.github/tests/blobber.yaml index 75480a1a4..3022d2cba 100644 --- a/.github/tests/blobber.yaml +++ b/.github/tests/blobber.yaml @@ -1,15 +1,15 @@ participants: - - el_client_type: geth - el_client_image: ethpandaops/geth:master - cl_client_type: lighthouse + - el_type: geth + el_image: ethpandaops/geth:master + cl_type: lighthouse blobber_enabled: true blobber_extra_params: - --proposal-action-frequency=1 - "--proposal-action={\"name\": \"blob_gossip_delay\", \"delay_milliseconds\": 1500}" count: 1 - - el_client_type: geth - el_client_image: ethpandaops/geth:master - cl_client_type: lodestar + - el_type: geth + el_image: ethpandaops/geth:master + cl_type: lodestar count: 1 network_params: deneb_fork_epoch: 1 diff --git a/.github/tests/dencun-devnet-12.yaml b/.github/tests/dencun-devnet-12.yaml index 7d63d6233..bd4dde136 100644 --- a/.github/tests/dencun-devnet-12.yaml +++ b/.github/tests/dencun-devnet-12.yaml @@ -1,16 +1,16 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: nimbus - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: nethermind + cl_type: prysm + - el_type: erigon + cl_type: nimbus + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: teku network_params: network: "dencun-devnet-12" additional_services: [] diff --git a/.github/tests/dencun-genesis.yaml b/.github/tests/dencun-genesis.yaml index 6c7709760..1b7c1e0c9 100644 --- a/.github/tests/dencun-genesis.yaml +++ b/.github/tests/dencun-genesis.yaml @@ -1,16 +1,16 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: nimbus - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: nethermind + cl_type: prysm + - el_type: erigon + cl_type: nimbus + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: teku network_params: deneb_fork_epoch: 0 additional_services: [] diff --git a/.github/tests/disable-peer-scoring.yaml b/.github/tests/disable-peer-scoring.yaml index 01834c423..2f86fb3fc 100644 --- a/.github/tests/disable-peer-scoring.yaml +++ b/.github/tests/disable-peer-scoring.yaml @@ -1,15 +1,15 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: erigon - cl_client_type: nimbus - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: ethereumjs - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: erigon + cl_type: nimbus + - el_type: nethermind + cl_type: prysm + - el_type: ethereumjs + cl_type: teku additional_services: [] disable_peer_scoring: true diff --git a/.github/tests/ephemery.yaml b/.github/tests/ephemery.yaml index e9b93177f..b494b818f 100644 --- a/.github/tests/ephemery.yaml +++ b/.github/tests/ephemery.yaml @@ -1,16 +1,16 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: nimbus - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: nethermind + cl_type: prysm + - el_type: erigon + cl_type: nimbus + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: teku network_params: network: "ephemery" additional_services: [] diff --git a/.github/tests/erigon-all.yaml b/.github/tests/erigon-all.yaml index 4185d2d6e..78ba81ccb 100644 --- a/.github/tests/erigon-all.yaml +++ b/.github/tests/erigon-all.yaml @@ -1,12 +1,12 @@ participants: - - el_client_type: erigon - cl_client_type: teku - - el_client_type: erigon - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: nimbus - - el_client_type: erigon - cl_client_type: lighthouse - - el_client_type: erigon - cl_client_type: lodestar + - el_type: erigon + cl_type: teku + - el_type: erigon + cl_type: prysm + - el_type: erigon + cl_type: nimbus + - el_type: erigon + cl_type: lighthouse + - el_type: erigon + cl_type: lodestar additional_services: [] diff --git a/.github/tests/ethereumjs-all.yaml b/.github/tests/ethereumjs-all.yaml index 9ef5a95e3..bc73b6c08 100644 --- a/.github/tests/ethereumjs-all.yaml +++ b/.github/tests/ethereumjs-all.yaml @@ -1,12 +1,12 @@ participants: - - el_client_type: ethereumjs - cl_client_type: teku - - el_client_type: ethereumjs - cl_client_type: prysm - - el_client_type: ethereumjs - cl_client_type: nimbus - - el_client_type: ethereumjs - cl_client_type: lighthouse - - el_client_type: ethereumjs - cl_client_type: lodestar + - el_type: ethereumjs + cl_type: teku + - el_type: ethereumjs + cl_type: prysm + - el_type: ethereumjs + cl_type: nimbus + - el_type: ethereumjs + cl_type: lighthouse + - el_type: ethereumjs + cl_type: lodestar additional_services: [] diff --git a/.github/tests/geth-all.yaml b/.github/tests/geth-all.yaml index 4ddd5eb0a..28c834872 100644 --- a/.github/tests/geth-all.yaml +++ b/.github/tests/geth-all.yaml @@ -1,12 +1,12 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: geth - cl_client_type: prysm - - el_client_type: geth - cl_client_type: nimbus - - el_client_type: geth - cl_client_type: lighthouse - - el_client_type: geth - cl_client_type: lodestar + - el_type: geth + cl_type: teku + - el_type: geth + cl_type: prysm + - el_type: geth + cl_type: nimbus + - el_type: geth + cl_type: lighthouse + - el_type: geth + cl_type: lodestar additional_services: [] diff --git a/.github/tests/holesky-shadowfork-verkle.yaml_norun b/.github/tests/holesky-shadowfork-verkle.yaml_norun index 52b3741d5..73cb68a28 100644 --- a/.github/tests/holesky-shadowfork-verkle.yaml_norun +++ b/.github/tests/holesky-shadowfork-verkle.yaml_norun @@ -1,12 +1,12 @@ participants: - - el_client_type: geth - el_client_image: ethpandaops/geth:transition-post-genesis-04b0304 - cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 - - el_client_type: geth - el_client_image: ethpandaops/geth:transition-post-genesis-04b0304 - cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b + - el_type: geth + el_image: ethpandaops/geth:transition-post-genesis-04b0304 + cl_type: lighthouse + cl_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 + - el_type: geth + el_image: ethpandaops/geth:transition-post-genesis-04b0304 + cl_type: lodestar + cl_image: ethpandaops/lodestar:g11tech-verge-815364b network_params: electra_fork_epoch: 1 network: holesky-shadowfork-verkle diff --git a/.github/tests/holesky-shadowfork.yaml_norun b/.github/tests/holesky-shadowfork.yaml_norun index ca2634a03..6e6b3da74 100644 --- a/.github/tests/holesky-shadowfork.yaml_norun +++ b/.github/tests/holesky-shadowfork.yaml_norun @@ -1,8 +1,8 @@ participants: - - el_client_type: geth - el_client_image: ethereum/client-go:v1.13.14 - cl_client_type: teku - cl_client_image: consensys/teku:24.2.0 + - el_type: geth + el_image: ethereum/client-go:v1.13.14 + cl_type: teku + cl_image: consensys/teku:24.2.0 network_params: dencun_fork_epoch: 0 network: holesky-shadowfork diff --git a/.github/tests/lighthouse-all.yaml b/.github/tests/lighthouse-all.yaml index 9c6d7b78f..72ea6a317 100644 --- a/.github/tests/lighthouse-all.yaml +++ b/.github/tests/lighthouse-all.yaml @@ -1,14 +1,14 @@ participants: - - el_client_type: geth - cl_client_type: lighthouse - - el_client_type: nethermind - cl_client_type: lighthouse - - el_client_type: erigon - cl_client_type: lighthouse - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lighthouse - - el_client_type: ethereumjs - cl_client_type: lighthouse + - el_type: geth + cl_type: lighthouse + - el_type: nethermind + cl_type: lighthouse + - el_type: erigon + cl_type: lighthouse + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lighthouse + - el_type: ethereumjs + cl_type: lighthouse additional_services: [] diff --git a/.github/tests/lodestar-all.yaml b/.github/tests/lodestar-all.yaml index 7067646d0..f83389374 100644 --- a/.github/tests/lodestar-all.yaml +++ b/.github/tests/lodestar-all.yaml @@ -1,14 +1,14 @@ participants: - - el_client_type: geth - cl_client_type: lodestar - - el_client_type: nethermind - cl_client_type: lodestar - - el_client_type: erigon - cl_client_type: lodestar - - el_client_type: besu - cl_client_type: lodestar - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: lodestar + - el_type: geth + cl_type: lodestar + - el_type: nethermind + cl_type: lodestar + - el_type: erigon + cl_type: lodestar + - el_type: besu + cl_type: lodestar + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: lodestar additional_services: [] diff --git a/.github/tests/mix-persistence-k8s.yaml b/.github/tests/mix-persistence-k8s.yaml index 6b83832a9..3a8921cbd 100644 --- a/.github/tests/mix-persistence-k8s.yaml +++ b/.github/tests/mix-persistence-k8s.yaml @@ -1,17 +1,17 @@ participants: - - el_client_type: geth - cl_client_type: teku - use_separate_validator_client: true - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: nimbus - use_separate_validator_client: true - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: nimbus + - el_type: geth + cl_type: teku + use_separate_vc: true + - el_type: nethermind + cl_type: prysm + - el_type: erigon + cl_type: nimbus + use_separate_vc: true + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: nimbus additional_services: [] persistent: true diff --git a/.github/tests/mix-persistence.yaml b/.github/tests/mix-persistence.yaml index 6b83832a9..3a8921cbd 100644 --- a/.github/tests/mix-persistence.yaml +++ b/.github/tests/mix-persistence.yaml @@ -1,17 +1,17 @@ participants: - - el_client_type: geth - cl_client_type: teku - use_separate_validator_client: true - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: nimbus - use_separate_validator_client: true - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: nimbus + - el_type: geth + cl_type: teku + use_separate_vc: true + - el_type: nethermind + cl_type: prysm + - el_type: erigon + cl_type: nimbus + use_separate_vc: true + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: nimbus additional_services: [] persistent: true diff --git a/.github/tests/mix-with-capella.yaml b/.github/tests/mix-with-capella.yaml index 175f99b8d..eb73c0f6f 100644 --- a/.github/tests/mix-with-capella.yaml +++ b/.github/tests/mix-with-capella.yaml @@ -1,16 +1,16 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: nimbus - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: nethermind + cl_type: prysm + - el_type: erigon + cl_type: nimbus + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: teku network_params: capella_fork_epoch: 1 additional_services: [] diff --git a/.github/tests/mix-with-tools-mev.yaml b/.github/tests/mix-with-tools-mev.yaml index 2c287081f..829d88381 100644 --- a/.github/tests/mix-with-tools-mev.yaml +++ b/.github/tests/mix-with-tools-mev.yaml @@ -1,16 +1,16 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: nimbus - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: nethermind + cl_type: prysm + - el_type: erigon + cl_type: nimbus + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: teku additional_services: - tx_spammer - blob_spammer diff --git a/.github/tests/mix-with-tools.yaml b/.github/tests/mix-with-tools.yaml index 52afabd67..2a07debee 100644 --- a/.github/tests/mix-with-tools.yaml +++ b/.github/tests/mix-with-tools.yaml @@ -1,16 +1,16 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: nimbus - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: nethermind + cl_type: prysm + - el_type: erigon + cl_type: nimbus + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: teku additional_services: - tx_spammer - blob_spammer diff --git a/.github/tests/mix.yaml b/.github/tests/mix.yaml index 20d655062..78a86f68c 100644 --- a/.github/tests/mix.yaml +++ b/.github/tests/mix.yaml @@ -1,14 +1,14 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: nimbus - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: nethermind + cl_type: prysm + - el_type: erigon + cl_type: nimbus + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: teku additional_services: [] diff --git a/.github/tests/mixed-cl-vc.yml b/.github/tests/mixed-cl-vc.yml index bf20528b6..9a0d43ea5 100644 --- a/.github/tests/mixed-cl-vc.yml +++ b/.github/tests/mixed-cl-vc.yml @@ -1,10 +1,10 @@ participants: - - el_client_type: geth - cl_client_type: teku - use_separate_validator_client: true - validator_client_type: lodestar - - el_client_type: besu - cl_client_type: nimbus - use_separate_validator_client: true - validator_client_type: lighthouse + - el_type: geth + cl_type: teku + use_separate_vc: true + vc_type: lodestar + - el_type: besu + cl_type: nimbus + use_separate_vc: true + vc_type: lighthouse additional_services: [] diff --git a/.github/tests/nethermind-all.yaml b/.github/tests/nethermind-all.yaml index 2acaab984..f84af46a2 100644 --- a/.github/tests/nethermind-all.yaml +++ b/.github/tests/nethermind-all.yaml @@ -1,12 +1,12 @@ participants: - - el_client_type: nethermind - cl_client_type: teku - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: nethermind - cl_client_type: nimbus - - el_client_type: nethermind - cl_client_type: lighthouse - - el_client_type: nethermind - cl_client_type: lodestar + - el_type: nethermind + cl_type: teku + - el_type: nethermind + cl_type: prysm + - el_type: nethermind + cl_type: nimbus + - el_type: nethermind + cl_type: lighthouse + - el_type: nethermind + cl_type: lodestar additional_services: [] diff --git a/.github/tests/nimbus-all.yaml b/.github/tests/nimbus-all.yaml index 73b1a6d7f..8d1b29124 100644 --- a/.github/tests/nimbus-all.yaml +++ b/.github/tests/nimbus-all.yaml @@ -1,14 +1,14 @@ participants: - - el_client_type: geth - cl_client_type: nimbus - - el_client_type: nethermind - cl_client_type: nimbus - - el_client_type: erigon - cl_client_type: nimbus - - el_client_type: besu - cl_client_type: nimbus - - el_client_type: reth - cl_client_type: nimbus - - el_client_type: ethereumjs - cl_client_type: nimbus + - el_type: geth + cl_type: nimbus + - el_type: nethermind + cl_type: nimbus + - el_type: erigon + cl_type: nimbus + - el_type: besu + cl_type: nimbus + - el_type: reth + cl_type: nimbus + - el_type: ethereumjs + cl_type: nimbus additional_services: [] diff --git a/.github/tests/nimbus-eth1-all.yaml b/.github/tests/nimbus-eth1-all.yaml index fe0ee47dc..0b3017cc8 100644 --- a/.github/tests/nimbus-eth1-all.yaml +++ b/.github/tests/nimbus-eth1-all.yaml @@ -1,14 +1,14 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: nimbus - cl_client_type: teku - - el_client_type: nimbus - cl_client_type: prysm - - el_client_type: nimbus - cl_client_type: nimbus - - el_client_type: nimbus - cl_client_type: lighthouse - - el_client_type: nimbus - cl_client_type: lodestar + - el_type: geth + cl_type: teku + - el_type: nimbus + cl_type: teku + - el_type: nimbus + cl_type: prysm + - el_type: nimbus + cl_type: nimbus + - el_type: nimbus + cl_type: lighthouse + - el_type: nimbus + cl_type: lodestar additional_services: [] diff --git a/.github/tests/nimbus-mev.yaml b/.github/tests/nimbus-mev.yaml index e00be406a..b6d1e42ff 100644 --- a/.github/tests/nimbus-mev.yaml +++ b/.github/tests/nimbus-mev.yaml @@ -1,4 +1,4 @@ participants: - - el_client_type: geth - cl_client_type: nimbus + - el_type: geth + cl_type: nimbus mev_type: full diff --git a/.github/tests/node-selectors.yaml b/.github/tests/node-selectors.yaml index fdd34e48e..c685a341c 100644 --- a/.github/tests/node-selectors.yaml +++ b/.github/tests/node-selectors.yaml @@ -1,13 +1,13 @@ participants: - - el_client_type: reth - cl_client_type: teku - use_separate_validator_client: true + - el_type: reth + cl_type: teku + use_separate_vc: true node_selectors: { "kubernetes.io/hostname": testing-1, } - - el_client_type: reth - cl_client_type: teku - use_separate_validator_client: true + - el_type: reth + cl_type: teku + use_separate_vc: true global_node_selectors: { "kubernetes.io/hostname": testing-2, } diff --git a/.github/tests/parallel-keystores-1.yaml b/.github/tests/parallel-keystores-1.yaml index c5295c443..b6585b08f 100644 --- a/.github/tests/parallel-keystores-1.yaml +++ b/.github/tests/parallel-keystores-1.yaml @@ -1,7 +1,7 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: geth - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: geth + cl_type: teku validator_count: 0 parallel_keystore_generation: true diff --git a/.github/tests/parallel-keystores-2.yaml b/.github/tests/parallel-keystores-2.yaml index 874323a06..9ac882dd7 100644 --- a/.github/tests/parallel-keystores-2.yaml +++ b/.github/tests/parallel-keystores-2.yaml @@ -1,7 +1,7 @@ participants: - - el_client_type: geth - cl_client_type: teku + - el_type: geth + cl_type: teku validator_count: 0 - - el_client_type: geth - cl_client_type: teku + - el_type: geth + cl_type: teku parallel_keystore_generation: true diff --git a/.github/tests/parallel-keystores-3.yaml b/.github/tests/parallel-keystores-3.yaml index 2e34d8c66..2e1735c4a 100644 --- a/.github/tests/parallel-keystores-3.yaml +++ b/.github/tests/parallel-keystores-3.yaml @@ -1,9 +1,9 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: geth - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: geth + cl_type: teku validator_count: 0 - - el_client_type: geth - cl_client_type: teku + - el_type: geth + cl_type: teku parallel_keystore_generation: true diff --git a/.github/tests/preregistered_validator_count.yaml b/.github/tests/preregistered_validator_count.yaml index ad1626f14..74db62f39 100644 --- a/.github/tests/preregistered_validator_count.yaml +++ b/.github/tests/preregistered_validator_count.yaml @@ -1,14 +1,14 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: geth - cl_client_type: prysm - - el_client_type: geth - cl_client_type: nimbus - - el_client_type: geth - cl_client_type: lighthouse - - el_client_type: geth - cl_client_type: lodestar + - el_type: geth + cl_type: teku + - el_type: geth + cl_type: prysm + - el_type: geth + cl_type: nimbus + - el_type: geth + cl_type: lighthouse + - el_type: geth + cl_type: lodestar additional_services: [] network_params: preregistered_validator_count: 400 diff --git a/.github/tests/prysm-all.yaml b/.github/tests/prysm-all.yaml index 9ac03f587..1b36d6250 100644 --- a/.github/tests/prysm-all.yaml +++ b/.github/tests/prysm-all.yaml @@ -1,14 +1,14 @@ participants: - - el_client_type: geth - cl_client_type: prysm - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: prysm - - el_client_type: besu - cl_client_type: prysm - - el_client_type: reth - cl_client_type: prysm - - el_client_type: ethereumjs - cl_client_type: prysm + - el_type: geth + cl_type: prysm + - el_type: nethermind + cl_type: prysm + - el_type: erigon + cl_type: prysm + - el_type: besu + cl_type: prysm + - el_type: reth + cl_type: prysm + - el_type: ethereumjs + cl_type: prysm additional_services: [] diff --git a/.github/tests/reth-all.yaml b/.github/tests/reth-all.yaml index f6c9be4e9..35fe37762 100644 --- a/.github/tests/reth-all.yaml +++ b/.github/tests/reth-all.yaml @@ -1,12 +1,12 @@ participants: - - el_client_type: reth - cl_client_type: teku - - el_client_type: reth - cl_client_type: prysm - - el_client_type: reth - cl_client_type: nimbus - - el_client_type: reth - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar + - el_type: reth + cl_type: teku + - el_type: reth + cl_type: prysm + - el_type: reth + cl_type: nimbus + - el_type: reth + cl_type: lighthouse + - el_type: reth + cl_type: lodestar additional_services: [] diff --git a/.github/tests/sepolia-mix.yaml b/.github/tests/sepolia-mix.yaml index 9376be804..51a4db021 100644 --- a/.github/tests/sepolia-mix.yaml +++ b/.github/tests/sepolia-mix.yaml @@ -1,16 +1,17 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: nethermind - cl_client_type: prysm - - el_client_type: erigon - cl_client_type: lighthouse - - el_client_type: besu - cl_client_type: lighthouse - - el_client_type: reth - cl_client_type: lodestar - - el_client_type: ethereumjs - cl_client_type: nimbus + - el_type: geth + cl_type: teku + - el_type: nethermind + cl_type: prysm + - el_type: erigon + el_image: ethpandaops/erigon:devel-d754b29 # this is a temp fix, till upstream is fixed + cl_type: lighthouse + - el_type: besu + cl_type: lighthouse + - el_type: reth + cl_type: lodestar + - el_type: ethereumjs + cl_type: nimbus network_params: network: sepolia additional_services: [] diff --git a/.github/tests/split-nimbus.yaml b/.github/tests/split-nimbus.yaml index d41d27d0b..fca61a8e3 100644 --- a/.github/tests/split-nimbus.yaml +++ b/.github/tests/split-nimbus.yaml @@ -1,21 +1,21 @@ participants: - - el_client_type: geth - cl_client_type: nimbus - use_separate_validator_client: true + - el_type: geth + cl_type: nimbus + use_separate_vc: true validator_count: 0 - - el_client_type: nethermind - cl_client_type: nimbus - use_separate_validator_client: true - - el_client_type: erigon - cl_client_type: nimbus - use_separate_validator_client: true - - el_client_type: besu - cl_client_type: nimbus - use_separate_validator_client: true - - el_client_type: reth - cl_client_type: nimbus - use_separate_validator_client: true - - el_client_type: ethereumjs - cl_client_type: nimbus - use_separate_validator_client: true + - el_type: nethermind + cl_type: nimbus + use_separate_vc: true + - el_type: erigon + cl_type: nimbus + use_separate_vc: true + - el_type: besu + cl_type: nimbus + use_separate_vc: true + - el_type: reth + cl_type: nimbus + use_separate_vc: true + - el_type: ethereumjs + cl_type: nimbus + use_separate_vc: true additional_services: [] diff --git a/.github/tests/split-teku.yaml b/.github/tests/split-teku.yaml index e2dfb6328..fc26dc780 100644 --- a/.github/tests/split-teku.yaml +++ b/.github/tests/split-teku.yaml @@ -1,21 +1,21 @@ participants: - - el_client_type: geth - cl_client_type: teku + - el_type: geth + cl_type: teku validator_count: 0 - use_separate_validator_client: true - - el_client_type: nethermind - cl_client_type: teku - use_separate_validator_client: true - - el_client_type: erigon - cl_client_type: teku - use_separate_validator_client: true - - el_client_type: besu - cl_client_type: teku - use_separate_validator_client: true - - el_client_type: reth - cl_client_type: teku - use_separate_validator_client: true - - el_client_type: ethereumjs - cl_client_type: teku - use_separate_validator_client: true + use_separate_vc: true + - el_type: nethermind + cl_type: teku + use_separate_vc: true + - el_type: erigon + cl_type: teku + use_separate_vc: true + - el_type: besu + cl_type: teku + use_separate_vc: true + - el_type: reth + cl_type: teku + use_separate_vc: true + - el_type: ethereumjs + cl_type: teku + use_separate_vc: true additional_services: [] diff --git a/.github/tests/teku-all.yaml b/.github/tests/teku-all.yaml index 61e653cb8..a358a1e9e 100644 --- a/.github/tests/teku-all.yaml +++ b/.github/tests/teku-all.yaml @@ -1,14 +1,14 @@ participants: - - el_client_type: geth - cl_client_type: teku - - el_client_type: nethermind - cl_client_type: teku - - el_client_type: erigon - cl_client_type: teku - - el_client_type: besu - cl_client_type: teku - - el_client_type: reth - cl_client_type: teku - - el_client_type: ethereumjs - cl_client_type: teku + - el_type: geth + cl_type: teku + - el_type: nethermind + cl_type: teku + - el_type: erigon + cl_type: teku + - el_type: besu + cl_type: teku + - el_type: reth + cl_type: teku + - el_type: ethereumjs + cl_type: teku additional_services: [] diff --git a/.github/tests/tolerations.yaml b/.github/tests/tolerations.yaml index 5635d2f6f..b318a6493 100644 --- a/.github/tests/tolerations.yaml +++ b/.github/tests/tolerations.yaml @@ -1,7 +1,7 @@ participants: - - el_client_type: reth - cl_client_type: teku - use_separate_validator_client: true + - el_type: reth + cl_type: teku + use_separate_vc: true cl_tolerations: - key: "node-role.kubernetes.io/master1" operator: "Exists" @@ -13,20 +13,20 @@ participants: - key: "node-role.kubernetes.io/master3" operator: "Exists" effect: "NoSchedule" - validator_tolerations: + vc_tolerations: - key: "node-role.kubernetes.io/master4" operator: "Exists" effect: "NoSchedule" - - el_client_type: reth - cl_client_type: teku - use_separate_validator_client: true + - el_type: reth + cl_type: teku + use_separate_vc: true tolerations: - key: "node-role.kubernetes.io/master5" operator: "Exists" effect: "NoSchedule" - - el_client_type: reth - cl_client_type: teku - use_separate_validator_client: true + - el_type: reth + cl_type: teku + use_separate_vc: true additional_services: - dora global_tolerations: diff --git a/.github/tests/verkle-gen-devnet-4.yaml b/.github/tests/verkle-gen-devnet-4.yaml index 2416a7a92..8b56647c9 100644 --- a/.github/tests/verkle-gen-devnet-4.yaml +++ b/.github/tests/verkle-gen-devnet-4.yaml @@ -1,13 +1,13 @@ participants: - - el_client_type: geth - el_client_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd - cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 + - el_type: geth + el_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd + cl_type: lighthouse + cl_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 count: 2 - - el_client_type: geth - el_client_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd - cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b + - el_type: geth + el_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd + cl_type: lodestar + cl_image: ethpandaops/lodestar:g11tech-verge-815364b network_params: network: verkle-gen-devnet-4 diff --git a/.github/tests/verkle-gen.yaml b/.github/tests/verkle-gen.yaml index bc50e1f07..0dda15c0a 100644 --- a/.github/tests/verkle-gen.yaml +++ b/.github/tests/verkle-gen.yaml @@ -1,13 +1,13 @@ participants: - - el_client_type: geth - el_client_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd - cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 + - el_type: geth + el_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd + cl_type: lighthouse + cl_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 count: 2 - - el_client_type: geth - el_client_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd - cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b + - el_type: geth + el_image: ethpandaops/geth:kaustinen-with-shapella-0b110bd + cl_type: lodestar + cl_image: ethpandaops/lodestar:g11tech-verge-815364b count: 2 network_params: electra_fork_epoch: 0 diff --git a/.github/tests/verkle.yaml b/.github/tests/verkle.yaml index fbcfe87c0..df6a39d2a 100644 --- a/.github/tests/verkle.yaml +++ b/.github/tests/verkle.yaml @@ -1,13 +1,13 @@ participants: - - el_client_type: geth - el_client_image: ethpandaops/geth:transition-post-genesis-04b0304 - cl_client_type: lighthouse - cl_client_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 + - el_type: geth + el_image: ethpandaops/geth:transition-post-genesis-04b0304 + cl_type: lighthouse + cl_image: ethpandaops/lighthouse:verkle-trees-capella-2ffb8a9 count: 2 - - el_client_type: geth - el_client_image: ethpandaops/geth:transition-post-genesis-04b0304 - cl_client_type: lodestar - cl_client_image: ethpandaops/lodestar:g11tech-verge-815364b + - el_type: geth + el_image: ethpandaops/geth:transition-post-genesis-04b0304 + cl_type: lodestar + cl_image: ethpandaops/lodestar:g11tech-verge-815364b network_params: electra_fork_epoch: 1 additional_services: diff --git a/README.md b/README.md index bbae4e3bf..5a4e9c66d 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,49 @@ +# Important recent update notes - temporary note +There are many participant fields that have been renamed to be more consistent with the rest of the package. The following fields have been renamed: +### EL Flags +``` +el_client_type -> el_type +el_client_image -> el_image +el_client_log_level -> el_log_level +el_client_volume_size -> el_volume_size +``` +### CL Flags +``` +cl_client_type -> cl_type +cl_client_image -> cl_image +cl_client_volume_size -> cl_volume_size +cl_client_log_level -> cl_log_level +beacon_extra_params -> cl_extra_params +beacon_extra_labels -> cl_extra_labels +bn_min_cpu -> cl_min_cpu +bn_max_cpu -> cl_max_cpu +bn_min_mem -> cl_min_mem +bn_max_mem -> cl_max_mem +use_separate_validator_client -> use_separate_vc +``` +### Validator flags +``` +validator_client_type -> vc_type +validator_tolerations -> vc_tolerations +validator_client_image -> vc_image +validator_extra_params -> vc_extra_params +validator_extra_labels -> vc_extra_labels +v_min_cpu -> vc_min_cpu +v_max_cpu -> vc_max_cpu +v_min_mem -> vc_min_mem +v_max_mem -> vc_max_mem +``` +### Global flags +``` +global_client_log_level -> global_log_level +``` + +To help you with the transition, we have added a script that will automatically update your `yaml` file to the new format. You can run the following command to update your network_params.yaml file: +```bash +./rename.sh example.yaml +``` + + # Ethereum Package ![Run of the Ethereum Network Package](run.gif) @@ -42,7 +88,7 @@ Optional features (enabled via flags or parameter files at runtime): Kurtosis packages are parameterizable, meaning you can customize your network and its behavior to suit your needs by storing parameters in a file that you can pass in at runtime like so: ```bash -kurtosis run --enclave my-testnet github.com/kurtosis-tech/ethereum-package "$(cat ~/network_params.yaml)" +kurtosis run --enclave my-testnet github.com/kurtosis-tech/ethereum-package --args-file network_params.yaml ``` Where `network_params.yaml` contains the parameters for your network in your home directory. @@ -60,7 +106,7 @@ When running on a public testnet using a cloud provider's Kubernetes cluster, th 3. Network Syncing: The disk speed provided by cloud providers may not be sufficient to sync with networks that have high demands, such as the mainnet. This could lead to syncing issues and delays. -To mitigate these issues, you can use the `el_client_volume_size` and `cl_client_volume_size` flags to override the default settings locally. This allows you to allocate more storage to the EL and CL clients, which can help accommodate faster state growth and improve syncing performance. However, keep in mind that increasing the volume size may also increase your cloud provider costs. Always monitor your usage and adjust as necessary to balance performance and cost. +To mitigate these issues, you can use the `el_volume_size` and `cl_volume_size` flags to override the default settings locally. This allows you to allocate more storage to the EL and CL clients, which can help accommodate faster state growth and improve syncing performance. However, keep in mind that increasing the volume size may also increase your cloud provider costs. Always monitor your usage and adjust as necessary to balance performance and cost. For optimal performance, we recommend using a cloud provider that allows you to provision Kubernetes clusters with fast persistent storage or self hosting your own Kubernetes cluster with fast persistent storage. @@ -89,8 +135,8 @@ persistent: true It is possible to run the package on a Kubernetes cluster with taints and tolerations. This is done by adding the tolerations to the `tolerations` field in the `network_params.yaml` file. For example: ```yaml participants: - - el_client_type: reth - cl_client_type: teku + - el_type: reth + cl_type: teku global_tolerations: - key: "node-role.kubernetes.io/master6" value: "true" @@ -99,7 +145,7 @@ global_tolerations: ``` It is possible to define toleration globally, per participant or per container. The order of precedence is as follows: -1. Container (`el_tolerations`, `cl_tolerations`, `validator_tolerations`) +1. Container (`el_tolerations`, `cl_tolerations`, `vc_tolerations`) 2. Participant (`tolerations`) 3. Global (`global_tolerations`) @@ -152,9 +198,10 @@ To configure the package behaviour, you can modify your `network_params.yaml` fi ```yaml # Specification of the participants in the network participants: +# EL(Execution Layer) Specific flags # The type of EL client that should be started # Valid values are geth, nethermind, erigon, besu, ethereumjs, reth, nimbus-eth1 -- el_client_type: geth +- el_type: geth # The Docker image that should be used for the EL client; leave blank to use the default for the client type # Defaults by client: @@ -165,30 +212,25 @@ participants: # - reth: ghcr.io/paradigmxyz/reth # - ethereumjs: ethpandaops/ethereumjs:master # - nimbus-eth1: ethpandaops/nimbus-eth1:master - el_client_image: "" + el_image: "" # The log level string that this participant's EL client should log at # If this is emptystring then the global `logLevel` parameter's value will be translated into a string appropriate for the client (e.g. if # global `logLevel` = `info` then Geth would receive `3`, Besu would receive `INFO`, etc.) # If this is not emptystring, then this value will override the global `logLevel` setting to allow for fine-grained control # over a specific participant's logging - el_client_log_level: "" - - # A list of optional extra params that will be passed to the EL client container for modifying its behaviour - el_extra_params: [] + el_log_level: "" # A list of optional extra env_vars the el container should spin up with el_extra_env_vars: {} - # Persistent storage size for the EL client container (in MB) - # Defaults to 0, which means that the default size for the client will be used - # Default values can be found in /src/package_io/constants.star VOLUME_SIZE - el_client_volume_size: 0 - # A list of optional extra labels the el container should spin up with # Example; el_extra_labels: {"ethereum-package.partition": "1"} el_extra_labels: {} + # A list of optional extra params that will be passed to the EL client container for modifying its behaviour + el_extra_params: [] + # A list of tolerations that will be passed to the EL client container # Only works with Kubernetes # Example: el_tolerations: @@ -200,9 +242,24 @@ participants: # Defaults to empty el_tolerations: [] + # Persistent storage size for the EL client container (in MB) + # Defaults to 0, which means that the default size for the client will be used + # Default values can be found in /src/package_io/constants.star VOLUME_SIZE + el_volume_size: 0 + + # Resource management for el containers + # CPU is milicores + # RAM is in MB + # Defaults are set per client + el_min_cpu: 0 + el_max_cpu: 0 + el_min_mem: 0 + el_max_mem: 0 + +# CL(Consensus Layer) Specific flags # The type of CL client that should be started # Valid values are nimbus, lighthouse, lodestar, teku, and prysm - cl_client_type: lighthouse + cl_type: lighthouse # The Docker image that should be used for the CL client; leave blank to use the default for the client type # Defaults by client: @@ -211,24 +268,61 @@ participants: # - nimbus: statusim/nimbus-eth2:multiarch-latest # - prysm: gcr.io/prysmaticlabs/prysm/beacon-chain:latest # - lodestar: chainsafe/lodestar:next - cl_client_image: "" + cl_image: "" # The log level string that this participant's CL client should log at # If this is emptystring then the global `logLevel` parameter's value will be translated into a string appropriate for the client (e.g. if # global `logLevel` = `info` then Teku would receive `INFO`, Prysm would receive `info`, etc.) # If this is not emptystring, then this value will override the global `logLevel` setting to allow for fine-grained control # over a specific participant's logging - cl_client_log_level: "" + cl_log_level: "" + + # A list of optional extra env_vars the cl container should spin up with + cl_extra_env_vars: {} + + # A list of optional extra labels that will be passed to the CL client Beacon container. + # Example; cl_extra_labels: {"ethereum-package.partition": "1"} + cl_extra_labels: {} + + # A list of optional extra params that will be passed to the CL client Beacon container for modifying its behaviour + # If the client combines the Beacon & validator nodes (e.g. Teku, Nimbus), then this list will be passed to the combined Beacon-validator node + cl_extra_params: [] + + # A list of tolerations that will be passed to the CL client container + # Only works with Kubernetes + # Example: el_tolerations: + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + # toleration_seconds: 3600 + # Defaults to empty + cl_tolerations: [] + + # Persistent storage size for the CL client container (in MB) + # Defaults to 0, which means that the default size for the client will be used + # Default values can be found in /src/package_io/constants.star VOLUME_SIZE + cl_volume_size: 0 + + # Resource management for cl containers + # CPU is milicores + # RAM is in MB + # Defaults are set per client + cl_min_cpu: 0 + cl_max_cpu: 0 + cl_min_mem: 0 + cl_max_mem: 0 # Whether to use a separate validator client attached to the CL client. # Defaults to false for clients that can run both in one process (Teku, Nimbus) - use_separate_validator_client: false + use_separate_vc: false +# VC (Validator Client) Specific flags # The type of validator client that should be used # Valid values are nimbus, lighthouse, lodestar, teku, and prysm # ( The prysm validator only works with a prysm CL client ) - # Defaults to matching the chosen CL client (cl_client_type) - validator_client_type: "" + # Defaults to matching the chosen CL client (cl_type) + vc_type: "" # The Docker image that should be used for the separate validator client # Defaults by client: @@ -237,14 +331,27 @@ participants: # - nimbus: statusim/nimbus-validator-client:multiarch-latest # - prysm: gcr.io/prysmaticlabs/prysm/validator:latest # - teku: consensys/teku:latest - validator_client_image: "" + vc_image: "" - # Persistent storage size for the CL client container (in MB) - # Defaults to 0, which means that the default size for the client will be used - # Default values can be found in /src/package_io/constants.star VOLUME_SIZE - cl_client_volume_size: 0 + # The log level string that this participant's CL client should log at + # If this is emptystring then the global `logLevel` parameter's value will be translated into a string appropriate for the client (e.g. if + # global `logLevel` = `info` then Teku would receive `INFO`, Prysm would receive `info`, etc.) + # If this is not emptystring, then this value will override the global `logLevel` setting to allow for fine-grained control + # over a specific participant's logging + vc_log_level: "" - # A list of tolerations that will be passed to the CL client container + # A list of optional extra env_vars the vc container should spin up with + vc_extra_env_vars: {} + + # A list of optional extra labels that will be passed to the CL client validator container. + # Example; vc_extra_labels: {"ethereum-package.partition": "1"} + vc_extra_labels: {} + + # A list of optional extra params that will be passed to the CL client validator container for modifying its behaviour + # If the client combines the Beacon & validator nodes (e.g. Teku, Nimbus), then this list will also be passed to the combined Beacon-validator node + vc_extra_params: [] + + # A list of tolerations that will be passed to the validator container # Only works with Kubernetes # Example: el_tolerations: # - key: "key" @@ -253,18 +360,28 @@ participants: # effect: "NoSchedule" # toleration_seconds: 3600 # Defaults to empty - cl_tolerations: [] + vc_tolerations: [] - # A list of tolerations that will be passed to the validator container + # Resource management for vc containers + # CPU is milicores + # RAM is in MB + # Defaults are set per client + vc_min_cpu: 0 + vc_max_cpu: 0 + vc_min_mem: 0 + vc_max_mem: 0 + + # Count of the number of validators you want to run for a given participant + # Default to null, which means that the number of validators will be using the + # network parameter num_validator_keys_per_node + validator_count: null + +#Participant specific flags + # Node selector # Only works with Kubernetes - # Example: el_tolerations: - # - key: "key" - # operator: "Equal" - # value: "value" - # effect: "NoSchedule" - # toleration_seconds: 3600 + # Example: node_selectors: { "disktype": "ssd" } # Defaults to empty - validator_tolerations: [] + node_selectors: {} # A list of tolerations that will be passed to the EL/CL/validator containers # This is to be used when you don't want to specify the tolerations for each container separately @@ -278,56 +395,9 @@ participants: # Defaults to empty tolerations: [] - # Node selector - # Only works with Kubernetes - # Example: node_selectors: { "disktype": "ssd" } - # Defaults to empty - node_selectors: {} - - # A list of optional extra params that will be passed to the CL client Beacon container for modifying its behaviour - # If the client combines the Beacon & validator nodes (e.g. Teku, Nimbus), then this list will be passed to the combined Beacon-validator node - beacon_extra_params: [] - - # A list of optional extra labels that will be passed to the CL client Beacon container. - # Example; beacon_extra_labels: {"ethereum-package.partition": "1"} - beacon_extra_labels: {} - - # A list of optional extra params that will be passed to the CL client validator container for modifying its behaviour - # If the client combines the Beacon & validator nodes (e.g. Teku, Nimbus), then this list will also be passed to the combined Beacon-validator node - validator_extra_params: [] - - # A list of optional extra labels that will be passed to the CL client validator container. - # Example; validator_extra_labels: {"ethereum-package.partition": "1"} - validator_extra_labels: {} - - # A set of parameters the node needs to reach an external block building network - # If `null` then the builder infrastructure will not be instantiated - # Example: - # - # "relay_endpoints": [ - # "https:#0xdeadbeefcafa@relay.example.com", - # "https:#0xdeadbeefcafb@relay.example.com", - # "https:#0xdeadbeefcafc@relay.example.com", - # "https:#0xdeadbeefcafd@relay.example.com" - # ] - builder_network_params: null - - # Resource management for el/beacon/validator containers - # CPU is milicores - # RAM is in MB - # Defaults are set per client - el_min_cpu: 0 - el_max_cpu: 0 - el_min_mem: 0 - el_max_mem: 0 - bn_min_cpu: 0 - bn_max_cpu: 0 - bn_min_mem: 0 - bn_max_mem: 0 - v_min_cpu: 0 - v_max_cpu: 0 - v_min_mem: 0 - v_max_mem: 0 + # Count of nodes to spin up for this participant + # Default to 1 + count: 1 # Snooper can be enabled with the `snooper_enabled` flag per client or globally # Defaults to false @@ -341,15 +411,6 @@ participants: # Defaults to false xatu_sentry_enabled: false - # Count of nodes to spin up for this participant - # Default to 1 - count: 1 - - # Count of the number of validators you want to run for a given participant - # Default to null, which means that the number of validators will be using the - # network parameter num_validator_keys_per_node - validator_count: null - # Prometheus additional configuration for a given participant prometheus target. # Execution, beacon and validator client targets on prometheus will include this # configuration. @@ -367,8 +428,26 @@ participants: # Defaults to empty blobber_extra_params: [] -# Default configuration parameters for the Eth network + # A set of parameters the node needs to reach an external block building network + # If `null` then the builder infrastructure will not be instantiated + # Example: + # + # "relay_endpoints": [ + # "https:#0xdeadbeefcafa@relay.example.com", + # "https:#0xdeadbeefcafb@relay.example.com", + # "https:#0xdeadbeefcafc@relay.example.com", + # "https:#0xdeadbeefcafd@relay.example.com" + # ] + builder_network_params: null + +# Default configuration parameters for the network network_params: + # Network name, used to enable syncing of alternative networks + # Defaults to "kurtosis" + # You can sync any public network by setting this to the network name (e.g. "mainnet", "goerli", "sepolia", "holesky") + # You can sync any devnet by setting this to the network name (e.g. "dencun-devnet-12", "verkle-gen-devnet-2") + network: "kurtosis" + # The network ID of the network. network_id: 3151908 @@ -384,8 +463,10 @@ network_params: # This mnemonic will a) be used to create keystores for all the types of validators that we have and b) be used to generate a CL genesis.ssz that has the children # validator keys already preregistered as validators preregistered_validator_keys_mnemonic: "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete" + # The number of pre-registered validators for genesis. If 0 or not specified then the value will be calculated from the participants preregistered_validator_count: 0 + # How long you want the network to wait before starting up genesis_delay: 20 @@ -403,17 +484,6 @@ network_params: # Defaults to 2048 eth1_follow_distance: 2048 - # The epoch at which the capella/deneb/electra forks are set to occur. - capella_fork_epoch: 0 - deneb_fork_epoch: 500 - electra_fork_epoch: null - - # Network name, used to enable syncing of alternative networks - # Defaults to "kurtosis" - # You can sync any public network by setting this to the network name (e.g. "mainnet", "goerli", "sepolia", "holesky") - # You can sync any devnet by setting this to the network name (e.g. "dencun-devnet-12", "verkle-gen-devnet-2") - network: "kurtosis" - # The number of epochs to wait validators to be able to withdraw # Defaults to 256 epochs ~27 hours min_validator_withdrawability_delay: 256 @@ -422,6 +492,11 @@ network_params: # Defaults to 256 epoch ~27 hours shard_committee_period: 256 + # The epoch at which the capella/deneb/electra forks are set to occur. + capella_fork_epoch: 0 + deneb_fork_epoch: 4 + electra_fork_epoch: null + # Network sync base url for syncing public networks from a custom snapshot (mostly useful for shadowforks) # Defaults to "https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/ # If you have a local snapshot, you can set this to the local url: @@ -429,6 +504,31 @@ network_params: # The snapshots are taken with https://github.com/ethpandaops/snapshotter network_sync_base_url: https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/ +# Global parameters for the network + +# By default includes +# - A transaction spammer & blob spammer is launched to fake transactions sent to the network +# - Forkmon for EL will be launched +# - A prometheus will be started, coupled with grafana +# - A beacon metrics gazer will be launched +# - A light beacon chain explorer will be launched +# - Default: ["tx_spammer", "blob_spammer", "el_forkmon", "beacon_metrics_gazer", "dora"," "prometheus_grafana"] +additional_services: + - assertoor + - broadcaster + - tx_spammer + - blob_spammer + - custom_flood + - goomy_blob + - el_forkmon + - blockscout + - beacon_metrics_gazer + - dora + - full_beaconchain_explorer + - prometheus_grafana + - blobscan + + # Configuration place for transaction spammer - https:#github.com/MariusVanDerWijden/tx-fuzz tx_spammer_params: # A list of optional extra params that will be passed to the TX Spammer container for modifying its behaviour @@ -503,35 +603,13 @@ assertoor_params: tests: [] -# By default includes -# - A transaction spammer & blob spammer is launched to fake transactions sent to the network -# - Forkmon for EL will be launched -# - A prometheus will be started, coupled with grafana -# - A beacon metrics gazer will be launched -# - A light beacon chain explorer will be launched -# - Default: ["tx_spammer", "blob_spammer", "el_forkmon", "beacon_metrics_gazer", "dora"," "prometheus_grafana"] -additional_services: - - assertoor - - broadcaster - - tx_spammer - - blob_spammer - - custom_flood - - goomy_blob - - el_forkmon - - blockscout - - beacon_metrics_gazer - - dora - - full_beaconchain_explorer - - prometheus_grafana - - blobscan - # If set, the package will block until a finalized epoch has occurred. wait_for_finalization: false # The global log level that all clients should log at # Valid values are "error", "warn", "info", "debug", and "trace" # This value will be overridden by participant-specific values -global_client_log_level: "info" +global_log_level: "info" # EngineAPI Snooper global flags for all participants # Default to false @@ -619,14 +697,14 @@ xatu_sentry_params: xatu_server_headers: {} # Beacon event stream topics to subscribe to beacon_subscriptions: - - attestation - - block - - chain_reorg - - finalized_checkpoint - - head - - voluntary_exit - - contribution_and_proof - - blob_sidecar + - attestation + - block + - chain_reorg + - finalized_checkpoint + - head + - voluntary_exit + - contribution_and_proof + - blob_sidecar # Global tolerations that will be passed to all containers (unless overridden by a more specific toleration) # Only works with Kubernetes @@ -653,31 +731,31 @@ global_node_selectors: {} ```yaml participants: - - el_client_type: geth - el_client_image: ethpandaops/geth: + - el_type: geth + el_image: ethpandaops/geth: elExtraParams: - "--override.verkle=" - cl_client_type: lighthouse - cl_client_image: sigp/lighthouse:latest - - el_client_type: geth - el_client_image: ethpandaops/geth: + cl_type: lighthouse + cl_image: sigp/lighthouse:latest + - el_type: geth + el_image: ethpandaops/geth: elExtraParams: - "--override.verkle=" - cl_client_type: lighthouse - cl_client_image: sigp/lighthouse:latest - - el_client_type: geth - el_client_image: ethpandaops/geth: + cl_type: lighthouse + cl_image: sigp/lighthouse:latest + - el_type: geth + el_image: ethpandaops/geth: elExtraParams: - "--override.verkle=" - cl_client_type: lighthouse - cl_client_image: sigp/lighthouse:latest + cl_type: lighthouse + cl_image: sigp/lighthouse:latest network_params: capella_fork_epoch: 2 - deneb_fork_epoch: 5 + deneb_fork_epoch: 4 additional_services: [] wait_for_finalization: false wait_for_verifications: false -global_client_log_level: info +global_log_level: info ``` @@ -689,20 +767,20 @@ global_client_log_level: info ```yaml participants: - - el_client_type: geth - el_client_image: '' - cl_client_type: lighthouse - cl_client_image: '' + - el_type: geth + el_image: '' + cl_type: lighthouse + cl_image: '' count: 2 - - el_client_type: nethermind - el_client_image: '' - cl_client_type: teku - cl_client_image: '' + - el_type: nethermind + el_image: '' + cl_type: teku + cl_image: '' count: 1 - - el_client_type: besu - el_client_image: '' - cl_client_type: prysm - cl_client_image: '' + - el_type: besu + el_image: '' + cl_type: prysm + cl_image: '' count: 2 mev_type: mock additional_services: [] @@ -715,13 +793,13 @@ additional_services: [] ```yaml participants: - - el_client_type: geth - cl_client_type: lighthouse + - el_type: geth + cl_type: lighthouse count: 2 - - el_client_type: nethermind - cl_client_type: teku - - el_client_type: besu - cl_client_type: prysm + - el_type: nethermind + cl_type: teku + - el_type: besu + cl_type: prysm count: 2 mev_type: full network_params: @@ -737,8 +815,8 @@ additional_services: [] ```yaml participants: - - el_client_type: geth - cl_client_type: lighthouse + - el_type: geth + cl_type: lighthouse count: 2 snooper_enabled: true ``` diff --git a/docs/architecture.md b/docs/architecture.md index 83c25a915..96469883d 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -63,7 +63,7 @@ Then the validator keys are generated. A tool called [eth2-val-tools](https://gi ### Starting EL clients -Next, we plug the generated genesis data [into EL client "launchers"](https://github.com/kurtosis-tech/ethereum-package/tree/main/src/participant_network/el) to start a mining network of EL nodes. The launchers come with a `launch` function that consumes EL genesis data and produces information about the running EL client node. Running EL node information is represented by [an `el_client_context` struct](https://github.com/kurtosis-tech/ethereum-package/blob/main/src/participant_network/el/el_client_context.star). Each EL client type has its own launcher (e.g. [Geth](https://github.com/kurtosis-tech/ethereum-package/tree/main/src/participant_network/el/geth), [Besu](https://github.com/kurtosis-tech/ethereum-package/tree/main/src/participant_network/el/besu)) because each EL client will require different environment variables and flags to be set when launching the client's container. +Next, we plug the generated genesis data [into EL client "launchers"](https://github.com/kurtosis-tech/ethereum-package/tree/main/src/participant_network/el) to start a mining network of EL nodes. The launchers come with a `launch` function that consumes EL genesis data and produces information about the running EL client node. Running EL node information is represented by [an `el_context` struct](https://github.com/kurtosis-tech/ethereum-package/blob/main/src/participant_network/el/el_context.star). Each EL client type has its own launcher (e.g. [Geth](https://github.com/kurtosis-tech/ethereum-package/tree/main/src/participant_network/el/geth), [Besu](https://github.com/kurtosis-tech/ethereum-package/tree/main/src/participant_network/el/besu)) because each EL client will require different environment variables and flags to be set when launching the client's container. ### Starting CL clients @@ -71,9 +71,9 @@ Once CL genesis data and keys have been created, the CL client nodes are started - CL client launchers implement come with a `launch` method - One CL client launcher exists per client type (e.g. [Nimbus](https://github.com/kurtosis-tech/ethereum-package/tree/main/src/participant_network/cl/nimbus), [Lighthouse](https://github.com/kurtosis-tech/ethereum-package/tree/main/src/participant_network/cl/lighthouse)) -- Launched CL node information is tracked in [a `cl_client_context` struct](https://github.com/kurtosis-tech/ethereum-package/blob/main/src/participant_network/cl/cl_client_context.star) +- Launched CL node information is tracked in [a `cl_context` struct](https://github.com/kurtosis-tech/ethereum-package/blob/main/src/participant_network/cl/cl_context.star) -There are only two major difference between CL client and EL client launchers. First, the `cl_client_launcher.launch` method also consumes an `el_client_context`, because each CL client is connected in a 1:1 relationship with an EL client. Second, because CL clients have keys, the keystore files are passed in to the `launch` function as well. +There are only two major difference between CL client and EL client launchers. First, the `cl_client_launcher.launch` method also consumes an `el_context`, because each CL client is connected in a 1:1 relationship with an EL client. Second, because CL clients have keys, the keystore files are passed in to the `launch` function as well. ## Auxiliary Services diff --git a/main.star b/main.star index a52722e28..02e42aba7 100644 --- a/main.star +++ b/main.star @@ -99,7 +99,7 @@ def run(plan, args={}): plan, args_with_right_defaults.participants, network_params, - args_with_right_defaults.global_client_log_level, + args_with_right_defaults.global_log_level, jwt_file, keymanager_file, keymanager_p12_file, @@ -112,20 +112,20 @@ def run(plan, args={}): plan.print( "NODE JSON RPC URI: '{0}:{1}'".format( - all_participants[0].el_client_context.ip_addr, - all_participants[0].el_client_context.rpc_port_num, + all_participants[0].el_context.ip_addr, + all_participants[0].el_context.rpc_port_num, ) ) - all_el_client_contexts = [] - all_cl_client_contexts = [] - all_validator_client_contexts = [] + all_el_contexts = [] + all_cl_contexts = [] + all_vc_contexts = [] all_ethereum_metrics_exporter_contexts = [] all_xatu_sentry_contexts = [] for participant in all_participants: - all_el_client_contexts.append(participant.el_client_context) - all_cl_client_contexts.append(participant.cl_client_context) - all_validator_client_contexts.append(participant.validator_client_context) + all_el_contexts.append(participant.el_context) + all_cl_contexts.append(participant.cl_context) + all_vc_contexts.append(participant.vc_context) all_ethereum_metrics_exporter_contexts.append( participant.ethereum_metrics_exporter_context ) @@ -138,13 +138,13 @@ def run(plan, args={}): ranges = validator_ranges.generate_validator_ranges( plan, validator_ranges_config_template, - all_cl_client_contexts, + all_cl_contexts, args_with_right_defaults.participants, ) fuzz_target = "http://{0}:{1}".format( - all_el_client_contexts[0].ip_addr, - all_el_client_contexts[0].rpc_port_num, + all_el_contexts[0].ip_addr, + all_el_contexts[0].rpc_port_num, ) # Broadcaster forwards requests, sent to it, to all nodes in parallel @@ -152,7 +152,7 @@ def run(plan, args={}): args_with_right_defaults.additional_services.remove("broadcaster") broadcaster_service = broadcaster.launch_broadcaster( plan, - all_el_client_contexts, + all_el_contexts, global_node_selectors, ) fuzz_target = "http://{0}:{1}".format( @@ -174,18 +174,18 @@ def run(plan, args={}): and args_with_right_defaults.mev_type == MOCK_MEV_TYPE ): el_uri = "{0}:{1}".format( - all_el_client_contexts[0].ip_addr, - all_el_client_contexts[0].engine_rpc_port_num, + all_el_contexts[0].ip_addr, + all_el_contexts[0].engine_rpc_port_num, ) beacon_uri = "{0}:{1}".format( - all_cl_client_contexts[0].ip_addr, all_cl_client_contexts[0].http_port_num + all_cl_contexts[0].ip_addr, all_cl_contexts[0].http_port_num ) endpoint = mock_mev.launch_mock_mev( plan, el_uri, beacon_uri, raw_jwt_secret, - args_with_right_defaults.global_client_log_level, + args_with_right_defaults.global_log_level, global_node_selectors, ) mev_endpoints.append(endpoint) @@ -194,16 +194,16 @@ def run(plan, args={}): and args_with_right_defaults.mev_type == FULL_MEV_TYPE ): builder_uri = "http://{0}:{1}".format( - all_el_client_contexts[-1].ip_addr, all_el_client_contexts[-1].rpc_port_num + all_el_contexts[-1].ip_addr, all_el_contexts[-1].rpc_port_num ) beacon_uris = ",".join( [ "http://{0}:{1}".format(context.ip_addr, context.http_port_num) - for context in all_cl_client_contexts + for context in all_cl_contexts ] ) - first_cl_client = all_cl_client_contexts[0] + first_cl_client = all_cl_contexts[0] first_client_beacon_name = first_cl_client.beacon_service_name contract_owner, normal_user = genesis_constants.PRE_FUNDED_ACCOUNTS[6:8] mev_flood.launch_mev_flood( @@ -263,8 +263,8 @@ def run(plan, args={}): mev_boost_service_name = "{0}-{1}-{2}-{3}".format( input_parser.MEV_BOOST_SERVICE_NAME_PREFIX, index_str, - participant.cl_client_type, - participant.el_client_type, + participant.cl_type, + participant.el_type, ) mev_boost_context = mev_boost.launch( plan, @@ -306,7 +306,7 @@ def run(plan, args={}): plan, genesis_constants.PRE_FUNDED_ACCOUNTS, fuzz_target, - all_cl_client_contexts[0], + all_cl_contexts[0], network_params.deneb_fork_epoch, network_params.seconds_per_slot, network_params.genesis_delay, @@ -319,8 +319,8 @@ def run(plan, args={}): goomy_blob.launch_goomy_blob( plan, genesis_constants.PRE_FUNDED_ACCOUNTS, - all_el_client_contexts, - all_cl_client_contexts[0], + all_el_contexts, + all_cl_contexts[0], network_params.seconds_per_slot, goomy_blob_params, global_node_selectors, @@ -336,7 +336,7 @@ def run(plan, args={}): el_forkmon.launch_el_forkmon( plan, el_forkmon_config_template, - all_el_client_contexts, + all_el_contexts, global_node_selectors, ) plan.print("Successfully launched execution layer forkmon") @@ -345,7 +345,7 @@ def run(plan, args={}): beacon_metrics_gazer_prometheus_metrics_job = ( beacon_metrics_gazer.launch_beacon_metrics_gazer( plan, - all_cl_client_contexts, + all_cl_contexts, network_params, global_node_selectors, ) @@ -359,7 +359,7 @@ def run(plan, args={}): plan.print("Launching blockscout") blockscout_sc_verif_url = blockscout.launch_blockscout( plan, - all_el_client_contexts, + all_el_contexts, persistent, global_node_selectors, ) @@ -370,7 +370,7 @@ def run(plan, args={}): dora.launch_dora( plan, dora_config_template, - all_cl_client_contexts, + all_cl_contexts, el_cl_data_files_artifact_uuid, network_params.electra_fork_epoch, network_params.network, @@ -381,8 +381,8 @@ def run(plan, args={}): plan.print("Launching blobscan") blobscan.launch_blobscan( plan, - all_cl_client_contexts, - all_el_client_contexts, + all_cl_contexts, + all_el_contexts, network_params.network_id, persistent, global_node_selectors, @@ -396,8 +396,8 @@ def run(plan, args={}): full_beaconchain_explorer.launch_full_beacon( plan, full_beaconchain_explorer_config_template, - all_cl_client_contexts, - all_el_client_contexts, + all_cl_contexts, + all_el_contexts, persistent, global_node_selectors, ) @@ -436,9 +436,9 @@ def run(plan, args={}): plan.print("Launching prometheus...") prometheus_private_url = prometheus.launch_prometheus( plan, - all_el_client_contexts, - all_cl_client_contexts, - all_validator_client_contexts, + all_el_contexts, + all_cl_contexts, + all_vc_contexts, prometheus_additional_metrics_jobs, all_ethereum_metrics_exporter_contexts, all_xatu_sentry_contexts, @@ -458,7 +458,7 @@ def run(plan, args={}): if args_with_right_defaults.wait_for_finalization: plan.print("Waiting for the first finalized epoch") - first_cl_client = all_cl_client_contexts[0] + first_cl_client = all_cl_contexts[0] first_client_beacon_name = first_cl_client.beacon_service_name epoch_recipe = GetHttpRequestRecipe( endpoint="/eth/v1/beacon/states/head/finality_checkpoints", diff --git a/network_params.yaml b/network_params.yaml index bbe181b0b..cc06ddfca 100644 --- a/network_params.yaml +++ b/network_params.yaml @@ -1,45 +1,59 @@ participants: - - el_client_type: geth - el_client_image: ethereum/client-go:latest - el_client_log_level: "" - el_extra_params: [] +# EL + - el_type: geth + el_image: ethereum/client-go:latest + el_log_level: "" + el_extra_env_vars: {} el_extra_labels: {} + el_extra_params: [] el_tolerations: [] - cl_client_type: lighthouse - cl_client_image: sigp/lighthouse:latest - cl_client_log_level: "" - cl_tolerations: [] - validator_tolerations: [] - tolerations: [] - node_selectors: {} - beacon_extra_params: [] - beacon_extra_labels: {} - validator_extra_params: [] - validator_extra_labels: {} - builder_network_params: null - validator_count: null - snooper_enabled: false - ethereum_metrics_exporter_enabled: false - xatu_sentry_enabled: false + el_volume_size: 0 el_min_cpu: 0 el_max_cpu: 0 el_min_mem: 0 el_max_mem: 0 - bn_min_cpu: 0 - bn_max_cpu: 0 - bn_min_mem: 0 - bn_max_mem: 0 - v_min_cpu: 0 - v_max_cpu: 0 - v_min_mem: 0 - v_max_mem: 0 +# CL + cl_type: lighthouse + cl_image: sigp/lighthouse:latest + cl_log_level: "" + cl_extra_env_vars: {} + cl_extra_labels: {} + cl_extra_params: [] + cl_tolerations: [] + cl_volume_size: 0 + cl_min_cpu: 0 + cl_max_cpu: 0 + cl_min_mem: 0 + cl_max_mem: 0 + use_separate_vc: true +# Validator + vc_type: lighthouse + vc_image: sigp/lighthouse:latest + vc_log_level: "" + vc_extra_env_vars: {} + vc_extra_labels: {} + vc_extra_params: [] + vc_tolerations: [] + vc_min_cpu: 0 + vc_max_cpu: 0 + vc_min_mem: 0 + vc_max_mem: 0 + validator_count: null +# participant specific + node_selectors: {} + tolerations: [] count: 2 + snooper_enabled: false + ethereum_metrics_exporter_enabled: false + xatu_sentry_enabled: false prometheus_config: scrape_interval: 15s labels: {} blobber_enabled: false blobber_extra_params: [] + builder_network_params: null network_params: + network: kurtosis network_id: "3151908" deposit_contract_address: "0x4242424242424242424242424242424242424242" seconds_per_slot: 12 @@ -52,14 +66,13 @@ network_params: genesis_delay: 20 max_churn: 8 ejection_balance: 16000000000 + eth1_follow_distance: 2048 + min_validator_withdrawability_delay: 256 + shard_committee_period: 256 capella_fork_epoch: 0 deneb_fork_epoch: 4 electra_fork_epoch: null - network: kurtosis - min_validator_withdrawability_delay: 256 - shard_committee_period: 256 network_sync_base_url: https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/ - additional_services: - tx_spammer - blob_spammer @@ -67,14 +80,34 @@ additional_services: - beacon_metrics_gazer - dora - prometheus_grafana +tx_spammer_params: + tx_spammer_extra_args: [] +goomy_blob_params: + goomy_blob_args: [] +assertoor_params: + image: "" + run_stability_check: true + run_block_proposal_check: true + run_transaction_test: false + run_blob_transaction_test: false + run_opcodes_transaction_test: false + run_lifecycle_test: false + tests: [] wait_for_finalization: false -global_client_log_level: info +global_log_level: info snooper_enabled: false ethereum_metrics_exporter_enabled: false parallel_keystore_generation: false +disable_peer_scoring: false +grafana_additional_dashboards: [] +persistent: false mev_type: null mev_params: mev_relay_image: flashbots/mev-boost-relay + mev_builder_image: ethpandaops/flashbots-builder:main + mev_builder_cl_image: sigp/lighthouse:latest + mev_boost_image: flashbots/mev-boost + mev_boost_args: ["mev-boost", "--relay-check"] mev_relay_api_extra_args: [] mev_relay_housekeeper_extra_args: [] mev_relay_website_extra_args: [] @@ -85,10 +118,22 @@ mev_params: mev_flood_image: flashbots/mev-flood mev_flood_extra_args: [] mev_flood_seconds_per_bundle: 15 - mev_boost_image: flashbots/mev-boost - mev_boost_args: ["mev-boost", "--relay-check"] -grafana_additional_dashboards: [] -persistent: false + custom_flood_params: + interval_between_transactions: 1 xatu_sentry_enabled: false +xatu_sentry_params: + xatu_sentry_image: ethpandaops/xatu-sentry + xatu_server_addr: localhost:8000 + xatu_server_tls: false + xatu_server_headers: {} + beacon_subscriptions: + - attestation + - block + - chain_reorg + - finalized_checkpoint + - head + - voluntary_exit + - contribution_and_proof + - blob_sidecar global_tolerations: [] global_node_selectors: {} diff --git a/rename.sh b/rename.sh new file mode 100755 index 000000000..8308683e8 --- /dev/null +++ b/rename.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# Helper function to perform replacements +perform_replacements() { + local input_file="$1" + shift + local replacements=("$@") + + for ((i = 0; i < ${#replacements[@]}; i+=2)); do + original="${replacements[$i]}" + replacement="${replacements[$i+1]}" + sed -i -- "s/$original/$replacement/g" "$input_file" + done +} + +# Check if an input file is provided +if [ $# -eq 0 ]; then + echo "Usage: $0 " + exit 1 +fi + +# Define the input YAML file +input_file="$1" + +# Define the replacement pairs as a list +replacements=( + el_client_type + el_type + el_client_image + el_image + el_client_log_level + el_log_level + el_client_volume_size + el_volume_size + cl_client_type + cl_type + cl_client_image + cl_image + cl_client_volume_size + cl_volume_size + cl_client_log_level + cl_log_level + beacon_extra_params + cl_extra_params + beacon_extra_labels + cl_extra_labels + bn_min_cpu + cl_min_cpu + bn_max_cpu + cl_max_cpu + bn_min_mem + cl_min_mem + bn_max_mem + cl_max_mem + use_separate_validator_client + use_separate_vc + validator_client_type + vc_type + validator_tolerations + vc_tolerations + validator_client_image + vc_image + validator_extra_params + vc_extra_params + validator_extra_labels + vc_extra_labels + v_min_cpu + vc_min_cpu + v_max_cpu + vc_max_cpu + v_min_mem + vc_min_mem + v_max_mem + vc_max_mem + global_client_log_level + global_log_level +) + +# Perform replacements +perform_replacements "$input_file" "${replacements[@]}" + +echo "Replacements completed." diff --git a/src/assertoor/assertoor_launcher.star b/src/assertoor/assertoor_launcher.star index 35c206412..76afa3d87 100644 --- a/src/assertoor/assertoor_launcher.star +++ b/src/assertoor/assertoor_launcher.star @@ -39,12 +39,12 @@ def launch_assertoor( global_node_selectors, ): all_client_info = [] - validator_client_info = [] + vc_info = [] for index, participant in enumerate(participant_contexts): participant_config = participant_configs[index] - cl_client = participant.cl_client_context - el_client = participant.el_client_context + cl_client = participant.cl_context + el_client = participant.el_context all_client_info.append( new_client_info( @@ -57,7 +57,7 @@ def launch_assertoor( ) if participant_config.validator_count != 0: - validator_client_info.append( + vc_info.append( new_client_info( cl_client.ip_addr, cl_client.http_port_num, @@ -68,7 +68,7 @@ def launch_assertoor( ) template_data = new_config_template_data( - HTTP_PORT_NUMBER, all_client_info, validator_client_info, assertoor_params + HTTP_PORT_NUMBER, all_client_info, vc_info, assertoor_params ) template_and_data = shared_utils.new_template_and_data( @@ -134,9 +134,7 @@ def get_config( ) -def new_config_template_data( - listen_port_num, client_info, validator_client_info, assertoor_params -): +def new_config_template_data(listen_port_num, client_info, vc_info, assertoor_params): additional_tests = [] for index, testcfg in enumerate(assertoor_params.tests): if type(testcfg) == "dict": @@ -153,7 +151,7 @@ def new_config_template_data( return { "ListenPortNum": listen_port_num, "ClientInfo": client_info, - "ValidatorClientInfo": validator_client_info, + "ValidatorClientInfo": vc_info, "RunStabilityCheck": assertoor_params.run_stability_check, "RunBlockProposalCheck": assertoor_params.run_block_proposal_check, "RunLifecycleTest": assertoor_params.run_lifecycle_test, diff --git a/src/beacon_metrics_gazer/beacon_metrics_gazer_launcher.star b/src/beacon_metrics_gazer/beacon_metrics_gazer_launcher.star index 2d0b6739c..35b04f4f8 100644 --- a/src/beacon_metrics_gazer/beacon_metrics_gazer_launcher.star +++ b/src/beacon_metrics_gazer/beacon_metrics_gazer_launcher.star @@ -33,13 +33,13 @@ MAX_MEMORY = 300 def launch_beacon_metrics_gazer( plan, - cl_client_contexts, + cl_contexts, network_params, global_node_selectors, ): config = get_config( - cl_client_contexts[0].ip_addr, - cl_client_contexts[0].http_port_num, + cl_contexts[0].ip_addr, + cl_contexts[0].http_port_num, global_node_selectors, ) diff --git a/src/blob_spammer/blob_spammer.star b/src/blob_spammer/blob_spammer.star index c294e60ff..6ee934158 100644 --- a/src/blob_spammer/blob_spammer.star +++ b/src/blob_spammer/blob_spammer.star @@ -14,7 +14,7 @@ def launch_blob_spammer( plan, prefunded_addresses, el_uri, - cl_client_context, + cl_context, deneb_fork_epoch, seconds_per_slot, genesis_delay, @@ -23,7 +23,7 @@ def launch_blob_spammer( config = get_config( prefunded_addresses, el_uri, - cl_client_context, + cl_context, deneb_fork_epoch, seconds_per_slot, genesis_delay, @@ -35,7 +35,7 @@ def launch_blob_spammer( def get_config( prefunded_addresses, el_uri, - cl_client_context, + cl_context, deneb_fork_epoch, seconds_per_slot, genesis_delay, @@ -51,12 +51,12 @@ def get_config( "apk update", "apk add curl jq", 'current_epoch=$(curl -s http://{0}:{1}/eth/v2/beacon/blocks/head | jq -r ".version")'.format( - cl_client_context.ip_addr, cl_client_context.http_port_num + cl_context.ip_addr, cl_context.http_port_num ), "echo $current_epoch", 'while [ $current_epoch != "deneb" ]; do echo "waiting for deneb, current epoch is $current_epoch"; current_epoch=$(curl -s http://{0}:{1}/eth/v2/beacon/blocks/head | jq -r ".version"); sleep {2}; done'.format( - cl_client_context.ip_addr, - cl_client_context.http_port_num, + cl_context.ip_addr, + cl_context.http_port_num, seconds_per_slot, ), 'echo "sleep is over, starting to send blob transactions"', diff --git a/src/blobber/blobber_launcher.star b/src/blobber/blobber_launcher.star index 8d9438f97..0927a1190 100644 --- a/src/blobber/blobber_launcher.star +++ b/src/blobber/blobber_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../shared_utils/shared_utils.star") input_parser = import_module("../package_io/input_parser.star") -cl_client_context = import_module("../cl/cl_client_context.star") +cl_context = import_module("../cl/cl_context.star") blobber_context = import_module("../blobber/blobber_context.star") diff --git a/src/blobscan/blobscan_launcher.star b/src/blobscan/blobscan_launcher.star index 2a5c90591..528d285a8 100644 --- a/src/blobscan/blobscan_launcher.star +++ b/src/blobscan/blobscan_launcher.star @@ -55,18 +55,18 @@ POSTGRES_MAX_MEMORY = 1024 def launch_blobscan( plan, - cl_client_contexts, - el_client_contexts, + cl_contexts, + el_contexts, chain_id, persistent, global_node_selectors, ): node_selectors = global_node_selectors beacon_node_rpc_uri = "http://{0}:{1}".format( - cl_client_contexts[0].ip_addr, cl_client_contexts[0].http_port_num + cl_contexts[0].ip_addr, cl_contexts[0].http_port_num ) execution_node_rpc_uri = "http://{0}:{1}".format( - el_client_contexts[0].ip_addr, el_client_contexts[0].rpc_port_num + el_contexts[0].ip_addr, el_contexts[0].rpc_port_num ) postgres_output = postgres.run( diff --git a/src/blockscout/blockscout_launcher.star b/src/blockscout/blockscout_launcher.star index 5c515d9d7..8b6e3e94c 100644 --- a/src/blockscout/blockscout_launcher.star +++ b/src/blockscout/blockscout_launcher.star @@ -40,7 +40,7 @@ VERIF_USED_PORTS = { def launch_blockscout( plan, - el_client_contexts, + el_contexts, persistent, global_node_selectors, ): @@ -53,11 +53,11 @@ def launch_blockscout( node_selectors=global_node_selectors, ) - el_client_context = el_client_contexts[0] + el_context = el_contexts[0] el_client_rpc_url = "http://{}:{}/".format( - el_client_context.ip_addr, el_client_context.rpc_port_num + el_context.ip_addr, el_context.rpc_port_num ) - el_client_name = el_client_context.client_name + el_client_name = el_context.client_name config_verif = get_config_verif(global_node_selectors) verif_service_name = "{}-verif".format(SERVICE_NAME_BLOCKSCOUT) diff --git a/src/broadcaster/broadcaster.star b/src/broadcaster/broadcaster.star index 1cb671598..7e7d0fca9 100644 --- a/src/broadcaster/broadcaster.star +++ b/src/broadcaster/broadcaster.star @@ -9,20 +9,20 @@ MIN_MEMORY = 128 MAX_MEMORY = 2048 -def launch_broadcaster(plan, all_el_client_contexts, global_node_selectors): - config = get_config(all_el_client_contexts, global_node_selectors) +def launch_broadcaster(plan, all_el_contexts, global_node_selectors): + config = get_config(all_el_contexts, global_node_selectors) return plan.add_service(SERVICE_NAME, config) def get_config( - all_el_client_contexts, + all_el_contexts, node_selectors, ): return ServiceConfig( image=IMAGE_NAME, cmd=[ "http://{0}:{1}".format(context.ip_addr, context.rpc_port_num) - for context in all_el_client_contexts + for context in all_el_contexts ], min_cpu=MIN_CPU, max_cpu=MAX_CPU, diff --git a/src/cl/cl_client_context.star b/src/cl/cl_context.star similarity index 96% rename from src/cl/cl_client_context.star rename to src/cl/cl_context.star index 0aeb9f35c..b89b79f74 100644 --- a/src/cl/cl_client_context.star +++ b/src/cl/cl_context.star @@ -1,4 +1,4 @@ -def new_cl_client_context( +def new_cl_context( client_name, enr, ip_addr, diff --git a/src/cl/cl_launcher.star b/src/cl/cl_launcher.star new file mode 100644 index 000000000..f5c1d4afb --- /dev/null +++ b/src/cl/cl_launcher.star @@ -0,0 +1,212 @@ +lighthouse = import_module("./lighthouse/lighthouse_launcher.star") +lodestar = import_module("./lodestar/lodestar_launcher.star") +nimbus = import_module("./nimbus/nimbus_launcher.star") +prysm = import_module("./prysm/prysm_launcher.star") +teku = import_module("./teku/teku_launcher.star") + +constants = import_module("../package_io/constants.star") +input_parser = import_module("../package_io/input_parser.star") +shared_utils = import_module("../shared_utils/shared_utils.star") + +snooper = import_module("../snooper/snooper_engine_launcher.star") + +cl_context_BOOTNODE = None + + +def launch( + plan, + network_params, + el_cl_data, + jwt_file, + keymanager_file, + keymanager_p12_file, + participants, + all_el_contexts, + global_log_level, + global_node_selectors, + global_tolerations, + persistent, + network_id, + num_participants, + validator_data, + prysm_password_relative_filepath, + prysm_password_artifact_uuid, +): + plan.print("Launching CL network") + + cl_launchers = { + constants.CL_TYPE.lighthouse: { + "launcher": lighthouse.new_lighthouse_launcher( + el_cl_data, jwt_file, network_params.network + ), + "launch_method": lighthouse.launch, + }, + constants.CL_TYPE.lodestar: { + "launcher": lodestar.new_lodestar_launcher( + el_cl_data, jwt_file, network_params.network + ), + "launch_method": lodestar.launch, + }, + constants.CL_TYPE.nimbus: { + "launcher": nimbus.new_nimbus_launcher( + el_cl_data, + jwt_file, + network_params.network, + keymanager_file, + ), + "launch_method": nimbus.launch, + }, + constants.CL_TYPE.prysm: { + "launcher": prysm.new_prysm_launcher( + el_cl_data, + jwt_file, + network_params.network, + prysm_password_relative_filepath, + prysm_password_artifact_uuid, + ), + "launch_method": prysm.launch, + }, + constants.CL_TYPE.teku: { + "launcher": teku.new_teku_launcher( + el_cl_data, + jwt_file, + network_params.network, + keymanager_file, + keymanager_p12_file, + ), + "launch_method": teku.launch, + }, + } + + all_snooper_engine_contexts = [] + all_cl_contexts = [] + preregistered_validator_keys_for_nodes = ( + validator_data.per_node_keystores + if network_params.network == constants.NETWORK_NAME.kurtosis + or constants.NETWORK_NAME.shadowfork in network_params.network + else None + ) + + for index, participant in enumerate(participants): + cl_type = participant.cl_type + el_type = participant.el_type + node_selectors = input_parser.get_client_node_selectors( + participant.node_selectors, + global_node_selectors, + ) + + if cl_type not in cl_launchers: + fail( + "Unsupported launcher '{0}', need one of '{1}'".format( + cl_type, ",".join([cl.name for cl in cl_launchers.keys()]) + ) + ) + + cl_launcher, launch_method = ( + cl_launchers[cl_type]["launcher"], + cl_launchers[cl_type]["launch_method"], + ) + + index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants)))) + + cl_service_name = "cl-{0}-{1}-{2}".format(index_str, cl_type, el_type) + new_cl_node_validator_keystores = None + if participant.validator_count != 0: + new_cl_node_validator_keystores = preregistered_validator_keys_for_nodes[ + index + ] + + el_context = all_el_contexts[index] + + cl_context = None + snooper_engine_context = None + if participant.snooper_enabled: + snooper_service_name = "snooper-{0}-{1}-{2}".format( + index_str, cl_type, el_type + ) + snooper_engine_context = snooper.launch( + plan, + snooper_service_name, + el_context, + node_selectors, + ) + plan.print( + "Successfully added {0} snooper participants".format( + snooper_engine_context + ) + ) + all_snooper_engine_contexts.append(snooper_engine_context) + + if index == 0: + cl_context = launch_method( + plan, + cl_launcher, + cl_service_name, + participant.cl_image, + participant.cl_log_level, + global_log_level, + cl_context_BOOTNODE, + el_context, + new_cl_node_validator_keystores, + participant.cl_min_cpu, + participant.cl_max_cpu, + participant.cl_min_mem, + participant.cl_max_mem, + participant.snooper_enabled, + snooper_engine_context, + participant.blobber_enabled, + participant.blobber_extra_params, + participant.cl_extra_params, + participant.cl_extra_env_vars, + participant.cl_extra_labels, + persistent, + participant.cl_volume_size, + participant.cl_tolerations, + participant.tolerations, + global_tolerations, + node_selectors, + participant.use_separate_vc, + ) + else: + boot_cl_client_ctx = all_cl_contexts + cl_context = launch_method( + plan, + cl_launcher, + cl_service_name, + participant.cl_image, + participant.cl_log_level, + global_log_level, + boot_cl_client_ctx, + el_context, + new_cl_node_validator_keystores, + participant.cl_min_cpu, + participant.cl_max_cpu, + participant.cl_min_mem, + participant.cl_max_mem, + participant.snooper_enabled, + snooper_engine_context, + participant.blobber_enabled, + participant.blobber_extra_params, + participant.cl_extra_params, + participant.cl_extra_env_vars, + participant.cl_extra_labels, + persistent, + participant.cl_volume_size, + participant.cl_tolerations, + participant.tolerations, + global_tolerations, + node_selectors, + participant.use_separate_vc, + ) + + # Add participant cl additional prometheus labels + for metrics_info in cl_context.cl_nodes_metrics_info: + if metrics_info != None: + metrics_info["config"] = participant.prometheus_config + + all_cl_contexts.append(cl_context) + return ( + all_cl_contexts, + all_snooper_engine_contexts, + preregistered_validator_keys_for_nodes, + ) diff --git a/src/cl/lighthouse/lighthouse_launcher.star b/src/cl/lighthouse/lighthouse_launcher.star index f0cead5ff..1e7686f92 100644 --- a/src/cl/lighthouse/lighthouse_launcher.star +++ b/src/cl/lighthouse/lighthouse_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") -cl_client_context = import_module("../../cl/cl_client_context.star") +cl_context = import_module("../../cl/cl_context.star") node_metrics = import_module("../../node_metrics_info.star") cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star") constants = import_module("../../package_io/constants.star") @@ -54,11 +54,11 @@ BEACON_USED_PORTS = { } VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace", + constants.GLOBAL_LOG_LEVEL.error: "error", + constants.GLOBAL_LOG_LEVEL.warn: "warn", + constants.GLOBAL_LOG_LEVEL.info: "info", + constants.GLOBAL_LOG_LEVEL.debug: "debug", + constants.GLOBAL_LOG_LEVEL.trace: "trace", } @@ -70,25 +70,26 @@ def launch( participant_log_level, global_log_level, bootnode_contexts, - el_client_context, + el_context, node_keystore_files, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, blobber_enabled, blobber_extra_params, - extra_beacon_params, - extra_beacon_labels, + extra_params, + extra_env_vars, + extra_labels, persistent, cl_volume_size, cl_tolerations, participant_tolerations, global_tolerations, node_selectors, - use_separate_validator_client=True, + use_separate_vc=True, ): beacon_service_name = "{0}".format(service_name) @@ -102,16 +103,16 @@ def launch( network_name = shared_utils.get_network_name(launcher.network) - bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU - bn_max_cpu = ( - int(bn_max_cpu) - if int(bn_max_cpu) > 0 + cl_min_cpu = int(cl_min_cpu) if int(cl_min_cpu) > 0 else BEACON_MIN_CPU + cl_max_cpu = ( + int(cl_max_cpu) + if int(cl_max_cpu) > 0 else constants.RAM_CPU_OVERRIDES[network_name]["lighthouse_max_cpu"] ) - bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY - bn_max_mem = ( - int(bn_max_mem) - if int(bn_max_mem) > 0 + cl_min_mem = int(cl_min_mem) if int(cl_min_mem) > 0 else BEACON_MIN_MEMORY + cl_max_mem = ( + int(cl_max_mem) + if int(cl_max_mem) > 0 else constants.RAM_CPU_OVERRIDES[network_name]["lighthouse_max_mem"] ) @@ -130,16 +131,17 @@ def launch( image, beacon_service_name, bootnode_contexts, - el_client_context, + el_context, log_level, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, - extra_beacon_params, - extra_beacon_labels, + extra_params, + extra_env_vars, + extra_labels, persistent, cl_volume_size, tolerations, @@ -198,7 +200,7 @@ def launch( ) nodes_metrics_info = [beacon_node_metrics_info] - return cl_client_context.new_cl_client_context( + return cl_context.new_cl_context( "lighthouse", beacon_node_enr, beacon_service.ip_address, @@ -223,15 +225,16 @@ def get_beacon_config( image, service_name, boot_cl_client_ctxs, - el_client_context, + el_context, log_level, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, extra_params, + extra_env_vars, extra_labels, persistent, cl_volume_size, @@ -246,8 +249,8 @@ def get_beacon_config( ) else: EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format( - el_client_context.ip_addr, - el_client_context.engine_rpc_port_num, + el_context.ip_addr, + el_context.engine_rpc_port_num, ) # NOTE: If connecting to the merge devnet remotely we DON'T want the following flags; when they're not set, the node's external IP address is auto-detected @@ -367,25 +370,27 @@ def get_beacon_config( persistent_key="data-{0}".format(service_name), size=cl_volume_size, ) + env = {RUST_BACKTRACE_ENVVAR_NAME: RUST_FULL_BACKTRACE_KEYWORD} + env.update(extra_env_vars) return ServiceConfig( image=image, ports=BEACON_USED_PORTS, cmd=cmd, files=files, - env_vars={RUST_BACKTRACE_ENVVAR_NAME: RUST_FULL_BACKTRACE_KEYWORD}, + env_vars=env, private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, ready_conditions=cl_node_ready_conditions.get_ready_conditions( BEACON_HTTP_PORT_ID ), - min_cpu=bn_min_cpu, - max_cpu=bn_max_cpu, - min_memory=bn_min_mem, - max_memory=bn_max_mem, + min_cpu=cl_min_cpu, + max_cpu=cl_max_cpu, + min_memory=cl_min_mem, + max_memory=cl_max_mem, labels=shared_utils.label_maker( - constants.CL_CLIENT_TYPE.lighthouse, + constants.CL_TYPE.lighthouse, constants.CLIENT_TYPES.cl, image, - el_client_context.client_name, + el_context.client_name, extra_labels, ), tolerations=tolerations, diff --git a/src/cl/lodestar/lodestar_launcher.star b/src/cl/lodestar/lodestar_launcher.star index dea87e67d..8cd4c2c2a 100644 --- a/src/cl/lodestar/lodestar_launcher.star +++ b/src/cl/lodestar/lodestar_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") -cl_client_context = import_module("../../cl/cl_client_context.star") +cl_context = import_module("../../cl/cl_context.star") node_metrics = import_module("../../node_metrics_info.star") cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star") blobber_launcher = import_module("../../blobber/blobber_launcher.star") @@ -43,11 +43,11 @@ BEACON_USED_PORTS = { } VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace", + constants.GLOBAL_LOG_LEVEL.error: "error", + constants.GLOBAL_LOG_LEVEL.warn: "warn", + constants.GLOBAL_LOG_LEVEL.info: "info", + constants.GLOBAL_LOG_LEVEL.debug: "debug", + constants.GLOBAL_LOG_LEVEL.trace: "trace", } @@ -59,25 +59,26 @@ def launch( participant_log_level, global_log_level, bootnode_contexts, - el_client_context, + el_context, node_keystore_files, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, blobber_enabled, blobber_extra_params, - extra_beacon_params, - extra_beacon_labels, + extra_params, + extra_env_vars, + extra_labels, persistent, cl_volume_size, cl_tolerations, participant_tolerations, global_tolerations, node_selectors, - use_separate_validator_client=True, + use_separate_vc=True, ): beacon_service_name = "{0}".format(service_name) log_level = input_parser.get_client_log_level_or_default( @@ -90,16 +91,16 @@ def launch( network_name = shared_utils.get_network_name(launcher.network) - bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU - bn_max_cpu = ( - int(bn_max_cpu) - if int(bn_max_cpu) > 0 + cl_min_cpu = int(cl_min_cpu) if int(cl_min_cpu) > 0 else BEACON_MIN_CPU + cl_max_cpu = ( + int(cl_max_cpu) + if int(cl_max_cpu) > 0 else constants.RAM_CPU_OVERRIDES[network_name]["lodestar_max_cpu"] ) - bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY - bn_max_mem = ( - int(bn_max_mem) - if int(bn_max_mem) > 0 + cl_min_mem = int(cl_min_mem) if int(cl_min_mem) > 0 else BEACON_MIN_MEMORY + cl_max_mem = ( + int(cl_max_mem) + if int(cl_max_mem) > 0 else constants.RAM_CPU_OVERRIDES[network_name]["lodestar_max_mem"] ) @@ -118,16 +119,17 @@ def launch( image, beacon_service_name, bootnode_contexts, - el_client_context, + el_context, log_level, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, - extra_beacon_params, - extra_beacon_labels, + extra_params, + extra_env_vars, + extra_labels, persistent, cl_volume_size, tolerations, @@ -189,7 +191,7 @@ def launch( ) nodes_metrics_info = [beacon_node_metrics_info] - return cl_client_context.new_cl_client_context( + return cl_context.new_cl_context( "lodestar", beacon_node_enr, beacon_service.ip_address, @@ -214,15 +216,16 @@ def get_beacon_config( image, service_name, bootnode_contexts, - el_client_context, + el_context, log_level, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, extra_params, + extra_env_vars, extra_labels, persistent, cl_volume_size, @@ -230,8 +233,8 @@ def get_beacon_config( node_selectors, ): el_client_rpc_url_str = "http://{0}:{1}".format( - el_client_context.ip_addr, - el_client_context.rpc_port_num, + el_context.ip_addr, + el_context.rpc_port_num, ) # If snooper is enabled use the snooper engine context, otherwise use the execution client context @@ -242,8 +245,8 @@ def get_beacon_config( ) else: EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format( - el_client_context.ip_addr, - el_client_context.engine_rpc_port_num, + el_context.ip_addr, + el_context.engine_rpc_port_num, ) cmd = [ @@ -344,20 +347,21 @@ def get_beacon_config( image=image, ports=BEACON_USED_PORTS, cmd=cmd, + env_vars=extra_env_vars, files=files, private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, ready_conditions=cl_node_ready_conditions.get_ready_conditions( BEACON_HTTP_PORT_ID ), - min_cpu=bn_min_cpu, - max_cpu=bn_max_cpu, - min_memory=bn_min_mem, - max_memory=bn_max_mem, + min_cpu=cl_min_cpu, + max_cpu=cl_max_cpu, + min_memory=cl_min_mem, + max_memory=cl_max_mem, labels=shared_utils.label_maker( - constants.CL_CLIENT_TYPE.lodestar, + constants.CL_TYPE.lodestar, constants.CLIENT_TYPES.cl, image, - el_client_context.client_name, + el_context.client_name, extra_labels, ), tolerations=tolerations, diff --git a/src/cl/nimbus/nimbus_launcher.star b/src/cl/nimbus/nimbus_launcher.star index 2995c88a6..14731c33f 100644 --- a/src/cl/nimbus/nimbus_launcher.star +++ b/src/cl/nimbus/nimbus_launcher.star @@ -1,11 +1,11 @@ # ---------------------------------- Library Imports ---------------------------------- shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") -cl_client_context = import_module("../../cl/cl_client_context.star") +cl_context = import_module("../../cl/cl_context.star") cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star") node_metrics = import_module("../../node_metrics_info.star") constants = import_module("../../package_io/constants.star") -validator_client_shared = import_module("../../validator_client/shared.star") +vc_shared = import_module("../../vc/shared.star") # ---------------------------------- Beacon client ------------------------------------- # Nimbus requires that its data directory already exists (because it expects you to bind-mount it), so we # have to to create it @@ -63,11 +63,11 @@ BEACON_USED_PORTS = { } VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "DEBUG", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE", + constants.GLOBAL_LOG_LEVEL.error: "ERROR", + constants.GLOBAL_LOG_LEVEL.warn: "WARN", + constants.GLOBAL_LOG_LEVEL.info: "INFO", + constants.GLOBAL_LOG_LEVEL.debug: "DEBUG", + constants.GLOBAL_LOG_LEVEL.trace: "TRACE", } ENTRYPOINT_ARGS = ["sh", "-c"] @@ -81,25 +81,26 @@ def launch( participant_log_level, global_log_level, bootnode_contexts, - el_client_context, + el_context, node_keystore_files, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, blobber_enabled, blobber_extra_params, - extra_beacon_params, - extra_beacon_labels, + extra_params, + extra_env_vars, + extra_labels, persistent, cl_volume_size, cl_tolerations, participant_tolerations, global_tolerations, node_selectors, - use_separate_validator_client, + use_separate_vc, ): beacon_service_name = "{0}".format(service_name) @@ -113,16 +114,16 @@ def launch( network_name = shared_utils.get_network_name(launcher.network) - bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU - bn_max_cpu = ( - int(bn_max_cpu) - if int(bn_max_cpu) > 0 + cl_min_cpu = int(cl_min_cpu) if int(cl_min_cpu) > 0 else BEACON_MIN_CPU + cl_max_cpu = ( + int(cl_max_cpu) + if int(cl_max_cpu) > 0 else constants.RAM_CPU_OVERRIDES[network_name]["nimbus_max_cpu"] ) - bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY - bn_max_mem = ( - int(bn_max_mem) - if int(bn_max_mem) > 0 + cl_min_mem = int(cl_min_mem) if int(cl_min_mem) > 0 else BEACON_MIN_MEMORY + cl_max_mem = ( + int(cl_max_mem) + if int(cl_max_mem) > 0 else constants.RAM_CPU_OVERRIDES[network_name]["nimbus_max_mem"] ) @@ -141,18 +142,19 @@ def launch( image, beacon_service_name, bootnode_contexts, - el_client_context, + el_context, log_level, node_keystore_files, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, - extra_beacon_params, - extra_beacon_labels, - use_separate_validator_client, + extra_params, + extra_env_vars, + extra_labels, + use_separate_vc, persistent, cl_volume_size, tolerations, @@ -190,7 +192,7 @@ def launch( ) nodes_metrics_info = [nimbus_node_metrics_info] - return cl_client_context.new_cl_client_context( + return cl_context.new_cl_context( "nimbus", beacon_node_enr, beacon_service.ip_address, @@ -216,18 +218,19 @@ def get_beacon_config( image, service_name, bootnode_contexts, - el_client_context, + el_context, log_level, node_keystore_files, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, extra_params, + extra_env_vars, extra_labels, - use_separate_validator_client, + use_separate_vc, persistent, cl_volume_size, tolerations, @@ -252,8 +255,8 @@ def get_beacon_config( ) else: EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format( - el_client_context.ip_addr, - el_client_context.engine_rpc_port_num, + el_context.ip_addr, + el_context.engine_rpc_port_num, ) cmd = [ @@ -295,12 +298,9 @@ def get_beacon_config( "--validators-dir=" + validator_keys_dirpath, "--secrets-dir=" + validator_secrets_dirpath, "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, - "--graffiti=" - + constants.CL_CLIENT_TYPE.nimbus - + "-" - + el_client_context.client_name, + "--graffiti=" + constants.CL_TYPE.nimbus + "-" + el_context.client_name, "--keymanager", - "--keymanager-port={0}".format(validator_client_shared.VALIDATOR_HTTP_PORT_NUM), + "--keymanager-port={0}".format(vc_shared.VALIDATOR_HTTP_PORT_NUM), "--keymanager-address=0.0.0.0", "--keymanager-allow-origin=*", "--keymanager-token-file=" + constants.KEYMANAGER_MOUNT_PATH_ON_CONTAINER, @@ -332,9 +332,9 @@ def get_beacon_config( } beacon_validator_used_ports = {} beacon_validator_used_ports.update(BEACON_USED_PORTS) - if node_keystore_files != None and not use_separate_validator_client: + if node_keystore_files != None and not use_separate_vc: validator_http_port_id_spec = shared_utils.new_port_spec( - validator_client_shared.VALIDATOR_HTTP_PORT_NUM, + vc_shared.VALIDATOR_HTTP_PORT_NUM, shared_utils.TCP_PROTOCOL, shared_utils.HTTP_APPLICATION_PROTOCOL, ) @@ -357,20 +357,21 @@ def get_beacon_config( image=image, ports=beacon_validator_used_ports, cmd=cmd, + env_vars=extra_env_vars, files=files, private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, ready_conditions=cl_node_ready_conditions.get_ready_conditions( BEACON_HTTP_PORT_ID ), - min_cpu=bn_min_cpu, - max_cpu=bn_max_cpu, - min_memory=bn_min_mem, - max_memory=bn_max_mem, + min_cpu=cl_min_cpu, + max_cpu=cl_max_cpu, + min_memory=cl_min_mem, + max_memory=cl_max_mem, labels=shared_utils.label_maker( - constants.CL_CLIENT_TYPE.nimbus, + constants.CL_TYPE.nimbus, constants.CLIENT_TYPES.cl, image, - el_client_context.client_name, + el_context.client_name, extra_labels, ), user=User(uid=0, gid=0), diff --git a/src/cl/prysm/prysm_launcher.star b/src/cl/prysm/prysm_launcher.star index 189f6c3ab..669193b74 100644 --- a/src/cl/prysm/prysm_launcher.star +++ b/src/cl/prysm/prysm_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") -cl_client_context = import_module("../../cl/cl_client_context.star") +cl_context = import_module("../../cl/cl_context.star") node_metrics = import_module("../../node_metrics_info.star") cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star") constants = import_module("../../package_io/constants.star") @@ -50,11 +50,11 @@ BEACON_NODE_USED_PORTS = { } VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace", + constants.GLOBAL_LOG_LEVEL.error: "error", + constants.GLOBAL_LOG_LEVEL.warn: "warn", + constants.GLOBAL_LOG_LEVEL.info: "info", + constants.GLOBAL_LOG_LEVEL.debug: "debug", + constants.GLOBAL_LOG_LEVEL.trace: "trace", } @@ -66,25 +66,26 @@ def launch( participant_log_level, global_log_level, bootnode_contexts, - el_client_context, + el_context, node_keystore_files, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, blobber_enabled, blobber_extra_params, - extra_beacon_params, - extra_beacon_labels, + extra_params, + extra_env_vars, + extra_labels, persistent, cl_volume_size, cl_tolerations, participant_tolerations, global_tolerations, node_selectors, - use_separate_validator_client=True, + use_separate_vc=True, ): beacon_service_name = "{0}".format(service_name) log_level = input_parser.get_client_log_level_or_default( @@ -97,16 +98,16 @@ def launch( network_name = shared_utils.get_network_name(launcher.network) - bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU - bn_max_cpu = ( - int(bn_max_cpu) - if int(bn_max_cpu) > 0 + cl_min_cpu = int(cl_min_cpu) if int(cl_min_cpu) > 0 else BEACON_MIN_CPU + cl_max_cpu = ( + int(cl_max_cpu) + if int(cl_max_cpu) > 0 else constants.RAM_CPU_OVERRIDES[network_name]["prysm_max_cpu"] ) - bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY - bn_max_mem = ( - int(bn_max_mem) - if int(bn_max_mem) > 0 + cl_min_mem = int(cl_min_mem) if int(cl_min_mem) > 0 else BEACON_MIN_MEMORY + cl_max_mem = ( + int(cl_max_mem) + if int(cl_max_mem) > 0 else constants.RAM_CPU_OVERRIDES[network_name]["prysm_max_mem"] ) @@ -124,16 +125,17 @@ def launch( image, beacon_service_name, bootnode_contexts, - el_client_context, + el_context, log_level, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, - extra_beacon_params, - extra_beacon_labels, + extra_params, + extra_env_vars, + extra_labels, persistent, cl_volume_size, tolerations, @@ -173,7 +175,7 @@ def launch( ) nodes_metrics_info = [beacon_node_metrics_info] - return cl_client_context.new_cl_client_context( + return cl_context.new_cl_context( "prysm", beacon_node_enr, beacon_service.ip_address, @@ -198,15 +200,16 @@ def get_beacon_config( beacon_image, service_name, bootnode_contexts, - el_client_context, + el_context, log_level, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, extra_params, + extra_env_vars, extra_labels, persistent, cl_volume_size, @@ -221,8 +224,8 @@ def get_beacon_config( ) else: EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format( - el_client_context.ip_addr, - el_client_context.engine_rpc_port_num, + el_context.ip_addr, + el_context.engine_rpc_port_num, ) cmd = [ @@ -326,20 +329,21 @@ def get_beacon_config( image=beacon_image, ports=BEACON_NODE_USED_PORTS, cmd=cmd, + env_vars=extra_env_vars, files=files, private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, ready_conditions=cl_node_ready_conditions.get_ready_conditions( BEACON_HTTP_PORT_ID ), - min_cpu=bn_min_cpu, - max_cpu=bn_max_cpu, - min_memory=bn_min_mem, - max_memory=bn_max_mem, + min_cpu=cl_min_cpu, + max_cpu=cl_max_cpu, + min_memory=cl_min_mem, + max_memory=cl_max_mem, labels=shared_utils.label_maker( - constants.CL_CLIENT_TYPE.prysm, + constants.CL_TYPE.prysm, constants.CLIENT_TYPES.cl, beacon_image, - el_client_context.client_name, + el_context.client_name, extra_labels, ), tolerations=tolerations, diff --git a/src/cl/teku/teku_launcher.star b/src/cl/teku/teku_launcher.star index b5aecdc3e..e49082d98 100644 --- a/src/cl/teku/teku_launcher.star +++ b/src/cl/teku/teku_launcher.star @@ -1,10 +1,10 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") -cl_client_context = import_module("../../cl/cl_client_context.star") +cl_context = import_module("../../cl/cl_context.star") node_metrics = import_module("../../node_metrics_info.star") cl_node_ready_conditions = import_module("../../cl/cl_node_ready_conditions.star") constants = import_module("../../package_io/constants.star") -validator_client_shared = import_module("../../validator_client/shared.star") +vc_shared = import_module("../../vc/shared.star") # ---------------------------------- Beacon client ------------------------------------- TEKU_BINARY_FILEPATH_IN_IMAGE = "/opt/teku/bin/teku" @@ -54,11 +54,11 @@ BEACON_USED_PORTS = { ENTRYPOINT_ARGS = ["sh", "-c"] VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "DEBUG", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE", + constants.GLOBAL_LOG_LEVEL.error: "ERROR", + constants.GLOBAL_LOG_LEVEL.warn: "WARN", + constants.GLOBAL_LOG_LEVEL.info: "INFO", + constants.GLOBAL_LOG_LEVEL.debug: "DEBUG", + constants.GLOBAL_LOG_LEVEL.trace: "TRACE", } @@ -70,25 +70,26 @@ def launch( participant_log_level, global_log_level, bootnode_context, - el_client_context, + el_context, node_keystore_files, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, blobber_enabled, blobber_extra_params, - extra_beacon_params, - extra_beacon_labels, + extra_params, + extra_env_vars, + extra_labels, persistent, cl_volume_size, cl_tolerations, participant_tolerations, global_tolerations, node_selectors, - use_separate_validator_client, + use_separate_vc, ): beacon_service_name = "{0}".format(service_name) log_level = input_parser.get_client_log_level_or_default( @@ -99,20 +100,20 @@ def launch( cl_tolerations, participant_tolerations, global_tolerations ) - extra_params = [param for param in extra_beacon_params] + extra_params = [param for param in extra_params] network_name = shared_utils.get_network_name(launcher.network) - bn_min_cpu = int(bn_min_cpu) if int(bn_min_cpu) > 0 else BEACON_MIN_CPU - bn_max_cpu = ( - int(bn_max_cpu) - if int(bn_max_cpu) > 0 + cl_min_cpu = int(cl_min_cpu) if int(cl_min_cpu) > 0 else BEACON_MIN_CPU + cl_max_cpu = ( + int(cl_max_cpu) + if int(cl_max_cpu) > 0 else constants.RAM_CPU_OVERRIDES[network_name]["teku_max_cpu"] ) - bn_min_mem = int(bn_min_mem) if int(bn_min_mem) > 0 else BEACON_MIN_MEMORY - bn_max_mem = ( - int(bn_max_mem) - if int(bn_max_mem) > 0 + cl_min_mem = int(cl_min_mem) if int(cl_min_mem) > 0 else BEACON_MIN_MEMORY + cl_max_mem = ( + int(cl_max_mem) + if int(cl_max_mem) > 0 else constants.RAM_CPU_OVERRIDES[network_name]["teku_max_mem"] ) @@ -132,18 +133,19 @@ def launch( image, beacon_service_name, bootnode_context, - el_client_context, + el_context, log_level, node_keystore_files, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, - extra_beacon_params, - extra_beacon_labels, - use_separate_validator_client, + extra_params, + extra_env_vars, + extra_labels, + use_separate_vc, persistent, cl_volume_size, tolerations, @@ -183,7 +185,7 @@ def launch( ) nodes_metrics_info = [beacon_node_metrics_info] - return cl_client_context.new_cl_client_context( + return cl_context.new_cl_context( "teku", beacon_node_enr, beacon_service.ip_address, @@ -210,18 +212,19 @@ def get_beacon_config( image, service_name, bootnode_contexts, - el_client_context, + el_context, log_level, node_keystore_files, - bn_min_cpu, - bn_max_cpu, - bn_min_mem, - bn_max_mem, + cl_min_cpu, + cl_max_cpu, + cl_min_mem, + cl_max_mem, snooper_enabled, snooper_engine_context, extra_params, + extra_env_vars, extra_labels, - use_separate_validator_client, + use_separate_vc, persistent, cl_volume_size, tolerations, @@ -246,8 +249,8 @@ def get_beacon_config( ) else: EXECUTION_ENGINE_ENDPOINT = "http://{0}:{1}".format( - el_client_context.ip_addr, - el_client_context.engine_rpc_port_num, + el_context.ip_addr, + el_context.engine_rpc_port_num, ) cmd = [ "--logging=" + log_level, @@ -293,14 +296,12 @@ def get_beacon_config( "--validators-proposer-default-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, "--validators-graffiti=" - + constants.CL_CLIENT_TYPE.teku + + constants.CL_TYPE.teku + "-" - + el_client_context.client_name, + + el_context.client_name, "--validator-api-enabled=true", "--validator-api-host-allowlist=*", - "--validator-api-port={0}".format( - validator_client_shared.VALIDATOR_HTTP_PORT_NUM - ), + "--validator-api-port={0}".format(vc_shared.VALIDATOR_HTTP_PORT_NUM), "--validator-api-interface=0.0.0.0", "--validator-api-keystore-file=" + constants.KEYMANAGER_P12_MOUNT_PATH_ON_CONTAINER, @@ -382,9 +383,9 @@ def get_beacon_config( } beacon_validator_used_ports = {} beacon_validator_used_ports.update(BEACON_USED_PORTS) - if node_keystore_files != None and not use_separate_validator_client: + if node_keystore_files != None and not use_separate_vc: validator_http_port_id_spec = shared_utils.new_port_spec( - validator_client_shared.VALIDATOR_HTTP_PORT_NUM, + vc_shared.VALIDATOR_HTTP_PORT_NUM, shared_utils.TCP_PROTOCOL, shared_utils.HTTP_APPLICATION_PROTOCOL, ) @@ -407,21 +408,21 @@ def get_beacon_config( image=image, ports=beacon_validator_used_ports, cmd=cmd, - # entrypoint=ENTRYPOINT_ARGS, + env_vars=extra_env_vars, files=files, private_ip_address_placeholder=PRIVATE_IP_ADDRESS_PLACEHOLDER, ready_conditions=cl_node_ready_conditions.get_ready_conditions( BEACON_HTTP_PORT_ID ), - min_cpu=bn_min_cpu, - max_cpu=bn_max_cpu, - min_memory=bn_min_mem, - max_memory=bn_max_mem, + min_cpu=cl_min_cpu, + max_cpu=cl_max_cpu, + min_memory=cl_min_mem, + max_memory=cl_max_mem, labels=shared_utils.label_maker( - constants.CL_CLIENT_TYPE.teku, + constants.CL_TYPE.teku, constants.CLIENT_TYPES.cl, image, - el_client_context.client_name, + el_context.client_name, extra_labels, ), user=User(uid=0, gid=0), diff --git a/src/dora/dora_launcher.star b/src/dora/dora_launcher.star index 568fd230d..5e0a38b55 100644 --- a/src/dora/dora_launcher.star +++ b/src/dora/dora_launcher.star @@ -30,14 +30,14 @@ USED_PORTS = { def launch_dora( plan, config_template, - cl_client_contexts, + cl_contexts, el_cl_data_files_artifact_uuid, electra_fork_epoch, network, global_node_selectors, ): all_cl_client_info = [] - for index, client in enumerate(cl_client_contexts): + for index, client in enumerate(cl_contexts): all_cl_client_info.append( new_cl_client_info( client.ip_addr, client.http_port_num, client.beacon_service_name diff --git a/src/el/besu/besu_launcher.star b/src/el/besu/besu_launcher.star index 7b6900719..c0e1f4263 100644 --- a/src/el/besu/besu_launcher.star +++ b/src/el/besu/besu_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") -el_client_context = import_module("../../el/el_client_context.star") +el_context = import_module("../../el/el_context.star") el_admin_node_info = import_module("../../el/el_admin_node_info.star") node_metrics = import_module("../../node_metrics_info.star") constants = import_module("../../package_io/constants.star") @@ -51,11 +51,11 @@ USED_PORTS = { ENTRYPOINT_ARGS = ["sh", "-c"] VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "DEBUG", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE", + constants.GLOBAL_LOG_LEVEL.error: "ERROR", + constants.GLOBAL_LOG_LEVEL.warn: "WARN", + constants.GLOBAL_LOG_LEVEL.info: "INFO", + constants.GLOBAL_LOG_LEVEL.debug: "DEBUG", + constants.GLOBAL_LOG_LEVEL.trace: "TRACE", } @@ -138,7 +138,7 @@ def launch( service_name, METRICS_PATH, metrics_url ) - return el_client_context.new_el_client_context( + return el_context.new_el_context( "besu", "", # besu has no ENR enode, @@ -262,7 +262,7 @@ def get_config( min_memory=el_min_mem, max_memory=el_max_mem, labels=shared_utils.label_maker( - constants.EL_CLIENT_TYPE.besu, + constants.EL_TYPE.besu, constants.CLIENT_TYPES.el, image, cl_client_name, diff --git a/src/el/el_client_context.star b/src/el/el_context.star similarity index 94% rename from src/el/el_client_context.star rename to src/el/el_context.star index c77e238af..a2dd8d77e 100644 --- a/src/el/el_client_context.star +++ b/src/el/el_context.star @@ -1,4 +1,4 @@ -def new_el_client_context( +def new_el_context( client_name, enr, enode, diff --git a/src/el/el_launcher.star b/src/el/el_launcher.star new file mode 100644 index 000000000..ef38e83e9 --- /dev/null +++ b/src/el/el_launcher.star @@ -0,0 +1,163 @@ +constants = import_module("../package_io/constants.star") +input_parser = import_module("../package_io/input_parser.star") +shared_utils = import_module("../shared_utils/shared_utils.star") + +geth = import_module("./geth/geth_launcher.star") +besu = import_module("./besu/besu_launcher.star") +erigon = import_module("./erigon/erigon_launcher.star") +nethermind = import_module("./nethermind/nethermind_launcher.star") +reth = import_module("./reth/reth_launcher.star") +ethereumjs = import_module("./ethereumjs/ethereumjs_launcher.star") +nimbus_eth1 = import_module("./nimbus-eth1/nimbus_launcher.star") + + +def launch( + plan, + network_params, + el_cl_data, + jwt_file, + participants, + global_log_level, + global_node_selectors, + global_tolerations, + persistent, + network_id, + num_participants, +): + el_launchers = { + constants.EL_TYPE.geth: { + "launcher": geth.new_geth_launcher( + el_cl_data, + jwt_file, + network_params.network, + network_id, + network_params.capella_fork_epoch, + el_cl_data.cancun_time, + el_cl_data.prague_time, + network_params.electra_fork_epoch, + ), + "launch_method": geth.launch, + }, + constants.EL_TYPE.gethbuilder: { + "launcher": geth.new_geth_launcher( + el_cl_data, + jwt_file, + network_params.network, + network_id, + network_params.capella_fork_epoch, + el_cl_data.cancun_time, + el_cl_data.prague_time, + network_params.electra_fork_epoch, + ), + "launch_method": geth.launch, + }, + constants.EL_TYPE.besu: { + "launcher": besu.new_besu_launcher( + el_cl_data, + jwt_file, + network_params.network, + ), + "launch_method": besu.launch, + }, + constants.EL_TYPE.erigon: { + "launcher": erigon.new_erigon_launcher( + el_cl_data, + jwt_file, + network_params.network, + network_id, + el_cl_data.cancun_time, + ), + "launch_method": erigon.launch, + }, + constants.EL_TYPE.nethermind: { + "launcher": nethermind.new_nethermind_launcher( + el_cl_data, + jwt_file, + network_params.network, + ), + "launch_method": nethermind.launch, + }, + constants.EL_TYPE.reth: { + "launcher": reth.new_reth_launcher( + el_cl_data, + jwt_file, + network_params.network, + ), + "launch_method": reth.launch, + }, + constants.EL_TYPE.ethereumjs: { + "launcher": ethereumjs.new_ethereumjs_launcher( + el_cl_data, + jwt_file, + network_params.network, + ), + "launch_method": ethereumjs.launch, + }, + constants.EL_TYPE.nimbus: { + "launcher": nimbus_eth1.new_nimbus_launcher( + el_cl_data, + jwt_file, + network_params.network, + ), + "launch_method": nimbus_eth1.launch, + }, + } + + all_el_contexts = [] + + for index, participant in enumerate(participants): + cl_type = participant.cl_type + el_type = participant.el_type + node_selectors = input_parser.get_client_node_selectors( + participant.node_selectors, + global_node_selectors, + ) + tolerations = input_parser.get_client_tolerations( + participant.el_tolerations, participant.tolerations, global_tolerations + ) + if el_type not in el_launchers: + fail( + "Unsupported launcher '{0}', need one of '{1}'".format( + el_type, ",".join([el.name for el in el_launchers.keys()]) + ) + ) + + el_launcher, launch_method = ( + el_launchers[el_type]["launcher"], + el_launchers[el_type]["launch_method"], + ) + + # Zero-pad the index using the calculated zfill value + index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants)))) + + el_service_name = "el-{0}-{1}-{2}".format(index_str, el_type, cl_type) + + el_context = launch_method( + plan, + el_launcher, + el_service_name, + participant.el_image, + participant.el_log_level, + global_log_level, + all_el_contexts, + participant.el_min_cpu, + participant.el_max_cpu, + participant.el_min_mem, + participant.el_max_mem, + participant.el_extra_params, + participant.el_extra_env_vars, + participant.el_extra_labels, + persistent, + participant.el_volume_size, + tolerations, + node_selectors, + ) + # Add participant el additional prometheus metrics + for metrics_info in el_context.el_metrics_info: + if metrics_info != None: + metrics_info["config"] = participant.prometheus_config + + all_el_contexts.append(el_context) + + plan.print("Successfully added {0} EL participants".format(num_participants)) + return all_el_contexts diff --git a/src/el/erigon/erigon_launcher.star b/src/el/erigon/erigon_launcher.star index 02539e8d0..3c5e45fee 100644 --- a/src/el/erigon/erigon_launcher.star +++ b/src/el/erigon/erigon_launcher.star @@ -1,7 +1,7 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") el_admin_node_info = import_module("../../el/el_admin_node_info.star") -el_client_context = import_module("../../el/el_client_context.star") +el_context = import_module("../../el/el_context.star") node_metrics = import_module("../../node_metrics_info.star") constants = import_module("../../package_io/constants.star") @@ -51,11 +51,11 @@ USED_PORTS = { ENTRYPOINT_ARGS = ["sh", "-c"] VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "1", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "2", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "3", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "4", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "5", + constants.GLOBAL_LOG_LEVEL.error: "1", + constants.GLOBAL_LOG_LEVEL.warn: "2", + constants.GLOBAL_LOG_LEVEL.info: "3", + constants.GLOBAL_LOG_LEVEL.debug: "4", + constants.GLOBAL_LOG_LEVEL.trace: "5", } @@ -142,7 +142,7 @@ def launch( service_name, METRICS_PATH, metrics_url ) - return el_client_context.new_el_client_context( + return el_context.new_el_context( "erigon", enr, enode, @@ -284,7 +284,7 @@ def get_config( max_memory=el_max_mem, env_vars=extra_env_vars, labels=shared_utils.label_maker( - constants.EL_CLIENT_TYPE.erigon, + constants.EL_TYPE.erigon, constants.CLIENT_TYPES.el, image, cl_client_name, diff --git a/src/el/ethereumjs/ethereumjs_launcher.star b/src/el/ethereumjs/ethereumjs_launcher.star index d15e17717..be0ae66da 100644 --- a/src/el/ethereumjs/ethereumjs_launcher.star +++ b/src/el/ethereumjs/ethereumjs_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../..//package_io/input_parser.star") -el_client_context = import_module("../../el/el_client_context.star") +el_context = import_module("../../el/el_context.star") el_admin_node_info = import_module("../../el/el_admin_node_info.star") node_metrics = import_module("../../node_metrics_info.star") @@ -53,11 +53,11 @@ USED_PORTS = { ENTRYPOINT_ARGS = [] VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace", + constants.GLOBAL_LOG_LEVEL.error: "error", + constants.GLOBAL_LOG_LEVEL.warn: "warn", + constants.GLOBAL_LOG_LEVEL.info: "info", + constants.GLOBAL_LOG_LEVEL.debug: "debug", + constants.GLOBAL_LOG_LEVEL.trace: "trace", } @@ -139,7 +139,7 @@ def launch( # metrics_url = "http://{0}:{1}".format(service.ip_address, METRICS_PORT_NUM) ethjs_metrics_info = None - return el_client_context.new_el_client_context( + return el_context.new_el_context( "ethereumjs", "", # ethereumjs has no enr enode, @@ -251,7 +251,7 @@ def get_config( max_memory=el_max_mem, env_vars=extra_env_vars, labels=shared_utils.label_maker( - constants.EL_CLIENT_TYPE.ethereumjs, + constants.EL_TYPE.ethereumjs, constants.CLIENT_TYPES.el, image, cl_client_name, diff --git a/src/el/geth/geth_launcher.star b/src/el/geth/geth_launcher.star index e2a61c044..0973a6cac 100644 --- a/src/el/geth/geth_launcher.star +++ b/src/el/geth/geth_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") -el_client_context = import_module("../../el/el_client_context.star") +el_context = import_module("../../el/el_context.star") el_admin_node_info = import_module("../../el/el_admin_node_info.star") genesis_constants = import_module( "../../prelaunch_data_generator/genesis_constants/genesis_constants.star" @@ -58,11 +58,11 @@ USED_PORTS = { ENTRYPOINT_ARGS = ["sh", "-c"] VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "1", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "2", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "3", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "4", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "5", + constants.GLOBAL_LOG_LEVEL.error: "1", + constants.GLOBAL_LOG_LEVEL.warn: "2", + constants.GLOBAL_LOG_LEVEL.info: "3", + constants.GLOBAL_LOG_LEVEL.debug: "4", + constants.GLOBAL_LOG_LEVEL.trace: "5", } BUILDER_IMAGE_STR = "builder" @@ -156,7 +156,7 @@ def launch( service_name, METRICS_PATH, metrics_url ) - return el_client_context.new_el_client_context( + return el_context.new_el_context( "geth", enr, enode, @@ -370,7 +370,7 @@ def get_config( max_memory=el_max_mem, env_vars=extra_env_vars, labels=shared_utils.label_maker( - constants.EL_CLIENT_TYPE.geth, + constants.EL_TYPE.geth, constants.CLIENT_TYPES.el, image, cl_client_name, diff --git a/src/el/nethermind/nethermind_launcher.star b/src/el/nethermind/nethermind_launcher.star index 9a90592ea..38d7d48a0 100644 --- a/src/el/nethermind/nethermind_launcher.star +++ b/src/el/nethermind/nethermind_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") -el_client_context = import_module("../../el/el_client_context.star") +el_context = import_module("../../el/el_context.star") el_admin_node_info = import_module("../../el/el_admin_node_info.star") node_metrics = import_module("../../node_metrics_info.star") @@ -49,11 +49,11 @@ USED_PORTS = { } VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "DEBUG", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE", + constants.GLOBAL_LOG_LEVEL.error: "ERROR", + constants.GLOBAL_LOG_LEVEL.warn: "WARN", + constants.GLOBAL_LOG_LEVEL.info: "INFO", + constants.GLOBAL_LOG_LEVEL.debug: "DEBUG", + constants.GLOBAL_LOG_LEVEL.trace: "TRACE", } @@ -136,7 +136,7 @@ def launch( service_name, METRICS_PATH, metrics_url ) - return el_client_context.new_el_client_context( + return el_context.new_el_context( "nethermind", "", # nethermind has no ENR in the eth2-merge-kurtosis-module either # Nethermind node info endpoint doesn't return ENR field https://docs.nethermind.io/nethermind/ethereum-client/json-rpc/admin @@ -259,7 +259,7 @@ def get_config( max_memory=el_max_mem, env_vars=extra_env_vars, labels=shared_utils.label_maker( - constants.EL_CLIENT_TYPE.nethermind, + constants.EL_TYPE.nethermind, constants.CLIENT_TYPES.el, image, cl_client_name, diff --git a/src/el/nimbus-eth1/nimbus_launcher.star b/src/el/nimbus-eth1/nimbus_launcher.star index 4b701e0fb..6652a2a07 100644 --- a/src/el/nimbus-eth1/nimbus_launcher.star +++ b/src/el/nimbus-eth1/nimbus_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") -el_client_context = import_module("../../el/el_client_context.star") +el_context = import_module("../../el/el_context.star") el_admin_node_info = import_module("../../el/el_admin_node_info.star") node_metrics = import_module("../../node_metrics_info.star") constants = import_module("../../package_io/constants.star") @@ -53,11 +53,11 @@ USED_PORTS = { } VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "ERROR", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "WARN", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "INFO", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "DEBUG", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "TRACE", + constants.GLOBAL_LOG_LEVEL.error: "ERROR", + constants.GLOBAL_LOG_LEVEL.warn: "WARN", + constants.GLOBAL_LOG_LEVEL.info: "INFO", + constants.GLOBAL_LOG_LEVEL.debug: "DEBUG", + constants.GLOBAL_LOG_LEVEL.trace: "TRACE", } @@ -141,7 +141,7 @@ def launch( service_name, METRICS_PATH, metric_url ) - return el_client_context.new_el_client_context( + return el_context.new_el_context( "nimbus", "", # nimbus has no enr enode, @@ -252,7 +252,7 @@ def get_config( max_memory=el_max_mem, env_vars=extra_env_vars, labels=shared_utils.label_maker( - constants.EL_CLIENT_TYPE.nimbus, + constants.EL_TYPE.nimbus, constants.CLIENT_TYPES.el, image, cl_client_name, diff --git a/src/el/reth/reth_launcher.star b/src/el/reth/reth_launcher.star index c391aea86..5bab6ff67 100644 --- a/src/el/reth/reth_launcher.star +++ b/src/el/reth/reth_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") input_parser = import_module("../../package_io/input_parser.star") -el_client_context = import_module("../../el/el_client_context.star") +el_context = import_module("../../el/el_context.star") el_admin_node_info = import_module("../../el/el_admin_node_info.star") node_metrics = import_module("../../node_metrics_info.star") constants = import_module("../../package_io/constants.star") @@ -51,11 +51,11 @@ USED_PORTS = { ENTRYPOINT_ARGS = ["sh", "-c"] VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "v", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "vv", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "vvv", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "vvvv", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "vvvvv", + constants.GLOBAL_LOG_LEVEL.error: "v", + constants.GLOBAL_LOG_LEVEL.warn: "vv", + constants.GLOBAL_LOG_LEVEL.info: "vvv", + constants.GLOBAL_LOG_LEVEL.debug: "vvvv", + constants.GLOBAL_LOG_LEVEL.trace: "vvvvv", } @@ -139,7 +139,7 @@ def launch( service_name, METRICS_PATH, metric_url ) - return el_client_context.new_el_client_context( + return el_context.new_el_context( "reth", "", # reth has no enr enode, @@ -265,7 +265,7 @@ def get_config( max_memory=el_max_mem, env_vars=extra_env_vars, labels=shared_utils.label_maker( - constants.EL_CLIENT_TYPE.reth, + constants.EL_TYPE.reth, constants.CLIENT_TYPES.el, image, cl_client_name, diff --git a/src/el_forkmon/el_forkmon_launcher.star b/src/el_forkmon/el_forkmon_launcher.star index a03df2999..ef0e93dce 100644 --- a/src/el_forkmon/el_forkmon_launcher.star +++ b/src/el_forkmon/el_forkmon_launcher.star @@ -29,11 +29,11 @@ MAX_MEMORY = 256 def launch_el_forkmon( plan, config_template, - el_client_contexts, + el_contexts, global_node_selectors, ): all_el_client_info = [] - for client in el_client_contexts: + for client in el_contexts: client_info = new_el_client_info( client.ip_addr, client.rpc_port_num, client.service_name ) diff --git a/src/ethereum_metrics_exporter/ethereum_metrics_exporter_launcher.star b/src/ethereum_metrics_exporter/ethereum_metrics_exporter_launcher.star index ba5321cc7..1c24162c9 100644 --- a/src/ethereum_metrics_exporter/ethereum_metrics_exporter_launcher.star +++ b/src/ethereum_metrics_exporter/ethereum_metrics_exporter_launcher.star @@ -20,8 +20,8 @@ def launch( plan, pair_name, ethereum_metrics_exporter_service_name, - el_client_context, - cl_client_context, + el_context, + cl_context, node_selectors, ): exporter_service = plan.add_service( @@ -40,13 +40,13 @@ def launch( str(METRICS_PORT_NUMBER), "--consensus-url", "http://{}:{}".format( - cl_client_context.ip_addr, - cl_client_context.http_port_num, + cl_context.ip_addr, + cl_context.http_port_num, ), "--execution-url", "http://{}:{}".format( - el_client_context.ip_addr, - el_client_context.rpc_port_num, + el_context.ip_addr, + el_context.rpc_port_num, ), ], min_cpu=MIN_CPU, @@ -61,6 +61,6 @@ def launch( pair_name, exporter_service.ip_address, METRICS_PORT_NUMBER, - cl_client_context.client_name, - el_client_context.client_name, + cl_context.client_name, + el_context.client_name, ) diff --git a/src/full_beaconchain/full_beaconchain_launcher.star b/src/full_beaconchain/full_beaconchain_launcher.star index f0f87d37d..1da4c260a 100644 --- a/src/full_beaconchain/full_beaconchain_launcher.star +++ b/src/full_beaconchain/full_beaconchain_launcher.star @@ -94,8 +94,8 @@ FRONTEND_MAX_MEMORY = 2048 def launch_full_beacon( plan, config_template, - cl_client_contexts, - el_client_contexts, + cl_contexts, + el_contexts, persistent, global_node_selectors, ): @@ -143,12 +143,12 @@ def launch_full_beacon( ) el_uri = "http://{0}:{1}".format( - el_client_contexts[0].ip_addr, el_client_contexts[0].rpc_port_num + el_contexts[0].ip_addr, el_contexts[0].rpc_port_num ) redis_url = "{}:{}".format(redis_output.hostname, redis_output.port_number) template_data = new_config_template_data( - cl_client_contexts[0], + cl_contexts[0], el_uri, little_bigtable.ip_address, LITTLE_BIGTABLE_PORT_NUMBER, diff --git a/src/goomy_blob/goomy_blob.star b/src/goomy_blob/goomy_blob.star index b6f55c6f2..8872d21c8 100644 --- a/src/goomy_blob/goomy_blob.star +++ b/src/goomy_blob/goomy_blob.star @@ -13,16 +13,16 @@ MAX_MEMORY = 300 def launch_goomy_blob( plan, prefunded_addresses, - el_client_contexts, - cl_client_context, + el_contexts, + cl_context, seconds_per_slot, goomy_blob_params, global_node_selectors, ): config = get_config( prefunded_addresses, - el_client_contexts, - cl_client_context, + el_contexts, + cl_context, seconds_per_slot, goomy_blob_params.goomy_blob_args, global_node_selectors, @@ -32,14 +32,14 @@ def launch_goomy_blob( def get_config( prefunded_addresses, - el_client_contexts, - cl_client_context, + el_contexts, + cl_context, seconds_per_slot, goomy_blob_args, node_selectors, ): goomy_cli_args = [] - for index, client in enumerate(el_client_contexts): + for index, client in enumerate(el_contexts): goomy_cli_args.append( "-h http://{0}:{1}".format( client.ip_addr, @@ -61,11 +61,11 @@ def get_config( "apt-get update", "apt-get install -y curl jq", 'current_epoch=$(curl -s http://{0}:{1}/eth/v2/beacon/blocks/head | jq -r ".version")'.format( - cl_client_context.ip_addr, cl_client_context.http_port_num + cl_context.ip_addr, cl_context.http_port_num ), 'while [ $current_epoch != "deneb" ]; do echo "waiting for deneb, current epoch is $current_epoch"; current_epoch=$(curl -s http://{0}:{1}/eth/v2/beacon/blocks/head | jq -r ".version"); sleep {2}; done'.format( - cl_client_context.ip_addr, - cl_client_context.http_port_num, + cl_context.ip_addr, + cl_context.http_port_num, seconds_per_slot, ), 'echo "sleep is over, starting to send blob transactions"', diff --git a/src/mev/mock_mev/mock_mev_launcher.star b/src/mev/mock_mev/mock_mev_launcher.star index 226831673..e8c11c43e 100644 --- a/src/mev/mock_mev/mock_mev_launcher.star +++ b/src/mev/mock_mev/mock_mev_launcher.star @@ -15,7 +15,7 @@ def launch_mock_mev( el_uri, beacon_uri, jwt_secret, - global_client_log_level, + global_log_level, global_node_selectors, ): mock_builder = plan.add_service( @@ -32,7 +32,7 @@ def launch_mock_mev( "--el={0}".format(el_uri), "--cl={0}".format(beacon_uri), "--bid-multiplier=5", # TODO: This could be customizable - "--log-level={0}".format(global_client_log_level), + "--log-level={0}".format(global_log_level), ], min_cpu=MIN_CPU, max_cpu=MAX_CPU, diff --git a/src/network_launcher/devnet.star b/src/network_launcher/devnet.star new file mode 100644 index 000000000..e592d6b6a --- /dev/null +++ b/src/network_launcher/devnet.star @@ -0,0 +1,34 @@ +shared_utils = import_module("../shared_utils/shared_utils.star") +el_cl_genesis_data = import_module( + "../prelaunch_data_generator/el_cl_genesis/el_cl_genesis_data.star" +) + + +def launch(plan, network, cancun_time, prague_time): + # We are running a devnet + url = shared_utils.calculate_devnet_url(network) + el_cl_genesis_uuid = plan.upload_files( + src=url, + name="el_cl_genesis", + ) + el_cl_genesis_data_uuid = plan.run_sh( + run="mkdir -p /network-configs/ && mv /opt/* /network-configs/", + store=[StoreSpec(src="/network-configs/", name="el_cl_genesis_data")], + files={"/opt": el_cl_genesis_uuid}, + ) + genesis_validators_root = read_file(url + "/genesis_validators_root.txt") + + el_cl_data = el_cl_genesis_data.new_el_cl_genesis_data( + el_cl_genesis_data_uuid.files_artifacts[0], + genesis_validators_root, + cancun_time, + prague_time, + ) + final_genesis_timestamp = shared_utils.read_genesis_timestamp_from_config( + plan, el_cl_genesis_data_uuid.files_artifacts[0] + ) + network_id = shared_utils.read_genesis_network_id_from_config( + plan, el_cl_genesis_data_uuid.files_artifacts[0] + ) + validator_data = None + return el_cl_data, final_genesis_timestamp, network_id, validator_data diff --git a/src/network_launcher/ephemery.star b/src/network_launcher/ephemery.star new file mode 100644 index 000000000..bc94db60f --- /dev/null +++ b/src/network_launcher/ephemery.star @@ -0,0 +1,30 @@ +shared_utils = import_module("../shared_utils/shared_utils.star") +el_cl_genesis_data = import_module( + "../prelaunch_data_generator/el_cl_genesis/el_cl_genesis_data.star" +) + + +def launch(plan, cancun_time, prague_time): + el_cl_genesis_data_uuid = plan.run_sh( + run="mkdir -p /network-configs/ && \ + curl -o latest.tar.gz https://ephemery.dev/latest.tar.gz && \ + tar xvzf latest.tar.gz -C /network-configs && \ + cat /network-configs/genesis_validators_root.txt", + image="badouralix/curl-jq", + store=[StoreSpec(src="/network-configs/", name="el_cl_genesis_data")], + ) + genesis_validators_root = el_cl_genesis_data_uuid.output + el_cl_data = el_cl_genesis_data.new_el_cl_genesis_data( + el_cl_genesis_data_uuid.files_artifacts[0], + genesis_validators_root, + cancun_time, + prague_time, + ) + final_genesis_timestamp = shared_utils.read_genesis_timestamp_from_config( + plan, el_cl_genesis_data_uuid.files_artifacts[0] + ) + network_id = shared_utils.read_genesis_network_id_from_config( + plan, el_cl_genesis_data_uuid.files_artifacts[0] + ) + validator_data = None + return el_cl_data, final_genesis_timestamp, network_id, validator_data diff --git a/src/network_launcher/kurtosis.star b/src/network_launcher/kurtosis.star new file mode 100644 index 000000000..94ca27f50 --- /dev/null +++ b/src/network_launcher/kurtosis.star @@ -0,0 +1,95 @@ +shared_utils = import_module("../shared_utils/shared_utils.star") +validator_keystores = import_module( + "../prelaunch_data_generator/validator_keystores/validator_keystore_generator.star" +) + +constants = import_module("../package_io/constants.star") + +# The time that the CL genesis generation step takes to complete, based off what we've seen +# This is in seconds +CL_GENESIS_DATA_GENERATION_TIME = 5 + +# Each CL node takes about this time to start up and start processing blocks, so when we create the CL +# genesis data we need to set the genesis timestamp in the future so that nodes don't miss important slots +# (e.g. Altair fork) +# TODO(old) Make this client-specific (currently this is Nimbus) +# This is in seconds +CL_NODE_STARTUP_TIME = 5 + + +def launch(plan, network_params, participants, parallel_keystore_generation): + num_participants = len(participants) + plan.print("Generating cl validator key stores") + validator_data = None + if not parallel_keystore_generation: + validator_data = validator_keystores.generate_validator_keystores( + plan, network_params.preregistered_validator_keys_mnemonic, participants + ) + else: + validator_data = validator_keystores.generate_valdiator_keystores_in_parallel( + plan, + network_params.preregistered_validator_keys_mnemonic, + participants, + ) + + plan.print(json.indent(json.encode(validator_data))) + + # We need to send the same genesis time to both the EL and the CL to ensure that timestamp based forking works as expected + final_genesis_timestamp = shared_utils.get_final_genesis_timestamp( + plan, + network_params.genesis_delay + + CL_GENESIS_DATA_GENERATION_TIME + + num_participants * CL_NODE_STARTUP_TIME, + ) + + # if preregistered validator count is 0 (default) then calculate the total number of validators from the participants + total_number_of_validator_keys = network_params.preregistered_validator_count + + if network_params.preregistered_validator_count == 0: + for participant in participants: + total_number_of_validator_keys += participant.validator_count + + plan.print("Generating EL CL data") + + # we are running bellatrix genesis (deprecated) - will be removed in the future + if ( + network_params.capella_fork_epoch > 0 + and network_params.electra_fork_epoch == None + ): + ethereum_genesis_generator_image = ( + constants.ETHEREUM_GENESIS_GENERATOR.bellatrix_genesis + ) + # we are running capella genesis - default behavior + elif ( + network_params.capella_fork_epoch == 0 + and network_params.electra_fork_epoch == None + and network_params.deneb_fork_epoch > 0 + ): + ethereum_genesis_generator_image = ( + constants.ETHEREUM_GENESIS_GENERATOR.capella_genesis + ) + # we are running deneb genesis - experimental, soon to become default + elif network_params.deneb_fork_epoch == 0: + ethereum_genesis_generator_image = ( + constants.ETHEREUM_GENESIS_GENERATOR.deneb_genesis + ) + # we are running electra - experimental + elif network_params.electra_fork_epoch != None: + if network_params.electra_fork_epoch == 0: + ethereum_genesis_generator_image = ( + constants.ETHEREUM_GENESIS_GENERATOR.verkle_genesis + ) + else: + ethereum_genesis_generator_image = ( + constants.ETHEREUM_GENESIS_GENERATOR.verkle_support_genesis + ) + else: + fail( + "Unsupported fork epoch configuration, need to define either capella_fork_epoch, deneb_fork_epoch or electra_fork_epoch" + ) + return ( + total_number_of_validator_keys, + ethereum_genesis_generator_image, + final_genesis_timestamp, + validator_data, + ) diff --git a/src/network_launcher/public_network.star b/src/network_launcher/public_network.star new file mode 100644 index 000000000..6e204cd7a --- /dev/null +++ b/src/network_launcher/public_network.star @@ -0,0 +1,23 @@ +shared_utils = import_module("../shared_utils/shared_utils.star") +el_cl_genesis_data = import_module( + "../prelaunch_data_generator/el_cl_genesis/el_cl_genesis_data.star" +) +constants = import_module("../package_io/constants.star") + + +def launch(plan, network, cancun_time, prague_time): + # We are running a public network + dummy_genesis_data = plan.run_sh( + run="mkdir /network-configs", + store=[StoreSpec(src="/network-configs/", name="el_cl_genesis_data")], + ) + el_cl_data = el_cl_genesis_data.new_el_cl_genesis_data( + dummy_genesis_data.files_artifacts[0], + constants.GENESIS_VALIDATORS_ROOT[network], + cancun_time, + prague_time, + ) + final_genesis_timestamp = constants.GENESIS_TIME[network] + network_id = constants.NETWORK_ID[network] + validator_data = None + return el_cl_data, final_genesis_timestamp, network_id, validator_data diff --git a/src/network_launcher/shadowfork.star b/src/network_launcher/shadowfork.star new file mode 100644 index 000000000..bde9c3ace --- /dev/null +++ b/src/network_launcher/shadowfork.star @@ -0,0 +1,107 @@ +shared_utils = import_module("../shared_utils/shared_utils.star") +constants = import_module("../package_io/constants.star") +input_parser = import_module("../package_io/input_parser.star") + + +def shadowfork_prep( + plan, + network_params, + shadowfork_block, + participants, + global_tolerations, + global_node_selectors, +): + base_network = shared_utils.get_network_name(network_params.network) + # overload the network name to remove the shadowfork suffix + if constants.NETWORK_NAME.ephemery in base_network: + chain_id = plan.run_sh( + run="curl -s https://ephemery.dev/latest/config.yaml | yq .DEPOSIT_CHAIN_ID | tr -d '\n'", + image="linuxserver/yq", + ) + network_id = chain_id.output + else: + network_id = constants.NETWORK_ID[ + base_network + ] # overload the network id to match the network name + latest_block = plan.run_sh( # fetch the latest block + run="mkdir -p /shadowfork && \ + curl -o /shadowfork/latest_block.json " + + network_params.network_sync_base_url + + base_network + + "/geth/" + + shadowfork_block + + "/_snapshot_eth_getBlockByNumber.json", + image="badouralix/curl-jq", + store=[StoreSpec(src="/shadowfork", name="latest_blocks")], + ) + + for index, participant in enumerate(participants): + tolerations = input_parser.get_client_tolerations( + participant.el_tolerations, + participant.tolerations, + global_tolerations, + ) + node_selectors = input_parser.get_client_node_selectors( + participant.node_selectors, + global_node_selectors, + ) + + cl_type = participant.cl_type + el_type = participant.el_type + + # Zero-pad the index using the calculated zfill value + index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants)))) + + el_service_name = "el-{0}-{1}-{2}".format(index_str, el_type, cl_type) + shadowfork_data = plan.add_service( + name="shadowfork-{0}".format(el_service_name), + config=ServiceConfig( + image="alpine:3.19.1", + cmd=[ + "apk add --no-cache curl tar zstd && curl -s -L " + + network_params.network_sync_base_url + + base_network + + "/" + + el_type + + "/" + + shadowfork_block + + "/snapshot.tar.zst" + + " | tar -I zstd -xvf - -C /data/" + + el_type + + "/execution-data" + + " && touch /tmp/finished" + + " && tail -f /dev/null" + ], + entrypoint=["/bin/sh", "-c"], + files={ + "/data/" + + el_type + + "/execution-data": Directory( + persistent_key="data-{0}".format(el_service_name), + size=constants.VOLUME_SIZE[base_network][ + el_type + "_volume_size" + ], + ), + }, + tolerations=tolerations, + node_selectors=node_selectors, + ), + ) + for index, participant in enumerate(participants): + cl_type = participant.cl_type + el_type = participant.el_type + + # Zero-pad the index using the calculated zfill value + index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants)))) + + el_service_name = "el-{0}-{1}-{2}".format(index_str, el_type, cl_type) + plan.wait( + service_name="shadowfork-{0}".format(el_service_name), + recipe=ExecRecipe(command=["cat", "/tmp/finished"]), + field="code", + assertion="==", + target_value=0, + interval="1s", + timeout="6h", # 6 hours should be enough for the biggest network + ) + return latest_block, network_id diff --git a/src/package_io/constants.star b/src/package_io/constants.star index 0368abe4d..28ccd2f17 100644 --- a/src/package_io/constants.star +++ b/src/package_io/constants.star @@ -1,4 +1,4 @@ -EL_CLIENT_TYPE = struct( +EL_TYPE = struct( gethbuilder="geth-builder", geth="geth", erigon="erigon", @@ -9,7 +9,7 @@ EL_CLIENT_TYPE = struct( nimbus="nimbus", ) -CL_CLIENT_TYPE = struct( +CL_TYPE = struct( lighthouse="lighthouse", teku="teku", nimbus="nimbus", @@ -17,7 +17,7 @@ CL_CLIENT_TYPE = struct( lodestar="lodestar", ) -VC_CLIENT_TYPE = struct( +VC_TYPE = struct( lighthouse="lighthouse", lodestar="lodestar", nimbus="nimbus", @@ -25,7 +25,7 @@ VC_CLIENT_TYPE = struct( teku="teku", ) -GLOBAL_CLIENT_LOG_LEVEL = struct( +GLOBAL_LOG_LEVEL = struct( info="info", error="error", warn="warn", @@ -410,11 +410,11 @@ RAM_CPU_OVERRIDES = { "prysm_max_cpu": 1000, # 1 core "lighthouse_max_mem": 1024, # 1GB "lighthouse_max_cpu": 1000, # 1 core - "teku_max_mem": 1024, # 1GB + "teku_max_mem": 2048, # 2GB "teku_max_cpu": 1000, # 1 core "nimbus_max_mem": 1024, # 1GB "nimbus_max_cpu": 1000, # 1 core - "lodestar_max_mem": 1024, # 1GB + "lodestar_max_mem": 2048, # 2GB "lodestar_max_cpu": 1000, # 1 core }, } diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index 585fc5db9..234c953c9 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -163,46 +163,46 @@ def input_parser(plan, input_args): return struct( participants=[ struct( - el_client_type=participant["el_client_type"], - el_client_image=participant["el_client_image"], - el_client_log_level=participant["el_client_log_level"], - el_client_volume_size=participant["el_client_volume_size"], + el_type=participant["el_type"], + el_image=participant["el_image"], + el_log_level=participant["el_log_level"], + el_volume_size=participant["el_volume_size"], el_extra_params=participant["el_extra_params"], el_extra_env_vars=participant["el_extra_env_vars"], el_extra_labels=participant["el_extra_labels"], el_tolerations=participant["el_tolerations"], - cl_client_type=participant["cl_client_type"], - cl_client_image=participant["cl_client_image"], - cl_client_log_level=participant["cl_client_log_level"], - cl_client_volume_size=participant["cl_client_volume_size"], + cl_type=participant["cl_type"], + cl_image=participant["cl_image"], + cl_log_level=participant["cl_log_level"], + cl_volume_size=participant["cl_volume_size"], + cl_extra_env_vars=participant["cl_extra_env_vars"], cl_tolerations=participant["cl_tolerations"], - use_separate_validator_client=participant[ - "use_separate_validator_client" - ], - validator_client_type=participant["validator_client_type"], - validator_client_image=participant["validator_client_image"], - validator_client_log_level=participant["validator_client_log_level"], - validator_tolerations=participant["validator_tolerations"], - tolerations=participant["tolerations"], - node_selectors=participant["node_selectors"], - beacon_extra_params=participant["beacon_extra_params"], - beacon_extra_labels=participant["beacon_extra_labels"], - validator_extra_params=participant["validator_extra_params"], - validator_extra_labels=participant["validator_extra_labels"], + use_separate_vc=participant["use_separate_vc"], + vc_type=participant["vc_type"], + vc_image=participant["vc_image"], + vc_log_level=participant["vc_log_level"], + vc_tolerations=participant["vc_tolerations"], + cl_extra_params=participant["cl_extra_params"], + cl_extra_labels=participant["cl_extra_labels"], + vc_extra_params=participant["vc_extra_params"], + vc_extra_env_vars=participant["vc_extra_env_vars"], + vc_extra_labels=participant["vc_extra_labels"], builder_network_params=participant["builder_network_params"], el_min_cpu=participant["el_min_cpu"], el_max_cpu=participant["el_max_cpu"], el_min_mem=participant["el_min_mem"], el_max_mem=participant["el_max_mem"], - bn_min_cpu=participant["bn_min_cpu"], - bn_max_cpu=participant["bn_max_cpu"], - bn_min_mem=participant["bn_min_mem"], - bn_max_mem=participant["bn_max_mem"], - v_min_cpu=participant["v_min_cpu"], - v_max_cpu=participant["v_max_cpu"], - v_min_mem=participant["v_min_mem"], - v_max_mem=participant["v_max_mem"], + cl_min_cpu=participant["cl_min_cpu"], + cl_max_cpu=participant["cl_max_cpu"], + cl_min_mem=participant["cl_min_mem"], + cl_max_mem=participant["cl_max_mem"], + vc_min_cpu=participant["vc_min_cpu"], + vc_max_cpu=participant["vc_max_cpu"], + vc_min_mem=participant["vc_min_mem"], + vc_max_mem=participant["vc_max_mem"], validator_count=participant["validator_count"], + tolerations=participant["tolerations"], + node_selectors=participant["node_selectors"], snooper_enabled=participant["snooper_enabled"], count=participant["count"], ethereum_metrics_exporter_enabled=participant[ @@ -296,7 +296,7 @@ def input_parser(plan, input_args): ), additional_services=result["additional_services"], wait_for_finalization=result["wait_for_finalization"], - global_client_log_level=result["global_client_log_level"], + global_log_level=result["global_log_level"], mev_type=result["mev_type"], snooper_enabled=result["snooper_enabled"], ethereum_metrics_exporter_enabled=result["ethereum_metrics_exporter_enabled"], @@ -344,62 +344,62 @@ def parse_network_params(input_args): actual_num_validators = 0 # validation of the above defaults for index, participant in enumerate(result["participants"]): - el_client_type = participant["el_client_type"] - cl_client_type = participant["cl_client_type"] - validator_client_type = participant["validator_client_type"] + el_type = participant["el_type"] + cl_type = participant["cl_type"] + vc_type = participant["vc_type"] - if cl_client_type in (NIMBUS_NODE_NAME) and ( + if cl_type in (NIMBUS_NODE_NAME) and ( result["network_params"]["seconds_per_slot"] < 12 ): fail("nimbus can't be run with slot times below 12 seconds") - el_image = participant["el_client_image"] + el_image = participant["el_image"] if el_image == "": - default_image = DEFAULT_EL_IMAGES.get(el_client_type, "") + default_image = DEFAULT_EL_IMAGES.get(el_type, "") if default_image == "": fail( "{0} received an empty image name and we don't have a default for it".format( - el_client_type + el_type ) ) - participant["el_client_image"] = default_image + participant["el_image"] = default_image - cl_image = participant["cl_client_image"] + cl_image = participant["cl_image"] if cl_image == "": - default_image = DEFAULT_CL_IMAGES.get(cl_client_type, "") + default_image = DEFAULT_CL_IMAGES.get(cl_type, "") if default_image == "": fail( "{0} received an empty image name and we don't have a default for it".format( - cl_client_type + cl_type ) ) - participant["cl_client_image"] = default_image + participant["cl_image"] = default_image - if participant["use_separate_validator_client"] == None: + if participant["use_separate_vc"] == None: # Default to false for CL clients that can run validator clients # in the same process. - if cl_client_type in ( - constants.CL_CLIENT_TYPE.nimbus, - constants.CL_CLIENT_TYPE.teku, + if cl_type in ( + constants.CL_TYPE.nimbus, + constants.CL_TYPE.teku, ): - participant["use_separate_validator_client"] = False + participant["use_separate_vc"] = False else: - participant["use_separate_validator_client"] = True + participant["use_separate_vc"] = True - if validator_client_type == "": + if vc_type == "": # Defaults to matching the chosen CL client - validator_client_type = cl_client_type - participant["validator_client_type"] = validator_client_type + vc_type = cl_type + participant["vc_type"] = vc_type - validator_client_image = participant["validator_client_image"] - if validator_client_image == "": + vc_image = participant["vc_image"] + if vc_image == "": if cl_image == "": # If the validator client image is also empty, default to the image for the chosen CL client - default_image = DEFAULT_VC_IMAGES.get(validator_client_type, "") + default_image = DEFAULT_VC_IMAGES.get(vc_type, "") else: - if cl_client_type == "prysm": + if cl_type == "prysm": default_image = cl_image.replace("beacon-chain", "validator") - elif cl_client_type == "nimbus": + elif cl_type == "nimbus": default_image = cl_image.replace( "nimbus-eth2", "nimbus-validator-client" ) @@ -408,10 +408,10 @@ def parse_network_params(input_args): if default_image == "": fail( "{0} received an empty image name and we don't have a default for it".format( - validator_client_type + vc_type ) ) - participant["validator_client_image"] = default_image + participant["vc_image"] = default_image snooper_enabled = participant["snooper_enabled"] if snooper_enabled == False: @@ -428,10 +428,10 @@ def parse_network_params(input_args): blobber_enabled = participant["blobber_enabled"] if blobber_enabled: # unless we are running lighthouse, we don't support blobber - if participant["cl_client_type"] != "lighthouse": + if participant["cl_type"] != "lighthouse": fail( "blobber is not supported for {0} client".format( - participant["cl_client_type"] + participant["cl_type"] ) ) @@ -458,11 +458,11 @@ def parse_network_params(input_args): actual_num_validators += participant["validator_count"] - beacon_extra_params = participant.get("beacon_extra_params", []) - participant["beacon_extra_params"] = beacon_extra_params + cl_extra_params = participant.get("cl_extra_params", []) + participant["cl_extra_params"] = cl_extra_params - validator_extra_params = participant.get("validator_extra_params", []) - participant["validator_extra_params"] = validator_extra_params + vc_extra_params = participant.get("vc_extra_params", []) + participant["vc_extra_params"] = vc_extra_params total_participant_count += participant["count"] @@ -586,91 +586,94 @@ def default_input_args(): "participants": participants, "network_params": network_params, "wait_for_finalization": False, - "global_client_log_level": "info", + "global_log_level": "info", "snooper_enabled": False, "ethereum_metrics_exporter_enabled": False, - "xatu_sentry_enabled": False, "parallel_keystore_generation": False, "disable_peer_scoring": False, + "persistent": False, + "mev_type": None, + "xatu_sentry_enabled": False, "global_tolerations": [], + "global_node_selectors": {}, } def default_network_params(): # this is temporary till we get params working return { - "preregistered_validator_keys_mnemonic": "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete", - "preregistered_validator_count": 0, - "num_validator_keys_per_node": 64, + "network": "kurtosis", "network_id": "3151908", "deposit_contract_address": "0x4242424242424242424242424242424242424242", "seconds_per_slot": 12, + "num_validator_keys_per_node": 64, + "preregistered_validator_keys_mnemonic": "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete", + "preregistered_validator_count": 0, "genesis_delay": 20, "max_churn": 8, "ejection_balance": 16000000000, "eth1_follow_distance": 2048, - "capella_fork_epoch": 0, - "deneb_fork_epoch": 500, - "electra_fork_epoch": None, - "network": "kurtosis", "min_validator_withdrawability_delay": 256, "shard_committee_period": 256, + "capella_fork_epoch": 0, + "deneb_fork_epoch": 4, + "electra_fork_epoch": None, "network_sync_base_url": "https://ethpandaops-ethereum-node-snapshots.ams3.digitaloceanspaces.com/", } def default_participant(): return { - "el_client_type": "geth", - "el_client_image": "", - "el_client_log_level": "", - "el_client_volume_size": 0, - "el_extra_params": [], + "el_type": "geth", + "el_image": "", + "el_log_level": "", "el_extra_env_vars": {}, "el_extra_labels": {}, + "el_extra_params": [], "el_tolerations": [], - "cl_client_type": "lighthouse", - "cl_client_image": "", - "cl_client_log_level": "", - "cl_client_volume_size": 0, - "use_separate_validator_client": None, - "validator_client_type": "", - "validator_client_log_level": "", - "validator_client_image": "", - "cl_tolerations": [], - "validator_tolerations": [], - "tolerations": [], - "node_selectors": {}, - "beacon_extra_params": [], - "beacon_extra_labels": {}, - "validator_extra_params": [], - "validator_extra_labels": {}, - "builder_network_params": None, + "el_volume_size": 0, "el_min_cpu": 0, "el_max_cpu": 0, "el_min_mem": 0, "el_max_mem": 0, - "bn_min_cpu": 0, - "bn_max_cpu": 0, - "bn_min_mem": 0, - "bn_max_mem": 0, - "v_min_cpu": 0, - "v_max_cpu": 0, - "v_min_mem": 0, - "v_max_mem": 0, + "cl_type": "lighthouse", + "cl_image": "", + "cl_log_level": "", + "cl_extra_env_vars": {}, + "cl_extra_labels": {}, + "cl_extra_params": [], + "cl_tolerations": [], + "cl_volume_size": 0, + "cl_min_cpu": 0, + "cl_max_cpu": 0, + "cl_min_mem": 0, + "cl_max_mem": 0, + "use_separate_vc": None, + "vc_type": "", + "vc_image": "", + "vc_log_level": "", + "vc_extra_env_vars": {}, + "vc_extra_labels": {}, + "vc_extra_params": [], + "vc_tolerations": [], + "vc_min_cpu": 0, + "vc_max_cpu": 0, + "vc_min_mem": 0, + "vc_max_mem": 0, "validator_count": None, + "node_selectors": {}, + "tolerations": [], + "count": 1, "snooper_enabled": False, "ethereum_metrics_exporter_enabled": False, "xatu_sentry_enabled": False, - "count": 1, "prometheus_config": { "scrape_interval": "15s", "labels": None, }, "blobber_enabled": False, "blobber_extra_params": [], - "global_tolerations": [], - "global_node_selectors": {}, + "builder_network_params": None, } @@ -742,14 +745,14 @@ def get_default_custom_flood_params(): def enrich_disable_peer_scoring(parsed_arguments_dict): for index, participant in enumerate(parsed_arguments_dict["participants"]): - if participant["cl_client_type"] == "lighthouse": - participant["beacon_extra_params"].append("--disable-peer-scoring") - if participant["cl_client_type"] == "prysm": - participant["beacon_extra_params"].append("--disable-peer-scorer") - if participant["cl_client_type"] == "teku": - participant["beacon_extra_params"].append("--Xp2p-gossip-scoring-enabled") - if participant["cl_client_type"] == "lodestar": - participant["beacon_extra_params"].append("--disablePeerScoring") + if participant["cl_type"] == "lighthouse": + participant["cl_extra_params"].append("--disable-peer-scoring") + if participant["cl_type"] == "prysm": + participant["cl_extra_params"].append("--disable-peer-scorer") + if participant["cl_type"] == "teku": + participant["cl_extra_params"].append("--Xp2p-gossip-scoring-enabled") + if participant["cl_type"] == "lodestar": + participant["cl_extra_params"].append("--disablePeerScoring") return parsed_arguments_dict @@ -762,36 +765,34 @@ def enrich_mev_extra_params(parsed_arguments_dict, mev_prefix, mev_port, mev_typ mev_url = "http://{0}-{1}-{2}-{3}:{4}".format( MEV_BOOST_SERVICE_NAME_PREFIX, index_str, - participant["cl_client_type"], - participant["el_client_type"], + participant["cl_type"], + participant["el_type"], mev_port, ) - if participant["cl_client_type"] == "lighthouse": - participant["validator_extra_params"].append("--builder-proposals") - participant["beacon_extra_params"].append("--builder={0}".format(mev_url)) - if participant["cl_client_type"] == "lodestar": - participant["validator_extra_params"].append("--builder") - participant["beacon_extra_params"].append("--builder") - participant["beacon_extra_params"].append( - "--builder.urls={0}".format(mev_url) - ) - if participant["cl_client_type"] == "nimbus": - participant["validator_extra_params"].append("--payload-builder=true") - participant["beacon_extra_params"].append("--payload-builder=true") - participant["beacon_extra_params"].append( + if participant["cl_type"] == "lighthouse": + participant["vc_extra_params"].append("--builder-proposals") + participant["cl_extra_params"].append("--builder={0}".format(mev_url)) + if participant["cl_type"] == "lodestar": + participant["vc_extra_params"].append("--builder") + participant["cl_extra_params"].append("--builder") + participant["cl_extra_params"].append("--builder.urls={0}".format(mev_url)) + if participant["cl_type"] == "nimbus": + participant["vc_extra_params"].append("--payload-builder=true") + participant["cl_extra_params"].append("--payload-builder=true") + participant["cl_extra_params"].append( "--payload-builder-url={0}".format(mev_url) ) - if participant["cl_client_type"] == "teku": - participant["validator_extra_params"].append( + if participant["cl_type"] == "teku": + participant["vc_extra_params"].append( "--validators-builder-registration-default-enabled=true" ) - participant["beacon_extra_params"].append( + participant["cl_extra_params"].append( "--builder-endpoint={0}".format(mev_url) ) - if participant["cl_client_type"] == "prysm": - participant["validator_extra_params"].append("--enable-builder") - participant["beacon_extra_params"].append( + if participant["cl_type"] == "prysm": + participant["vc_extra_params"].append("--enable-builder") + participant["cl_extra_params"].append( "--http-mev-relay={0}".format(mev_url) ) @@ -801,18 +802,12 @@ def enrich_mev_extra_params(parsed_arguments_dict, mev_prefix, mev_port, mev_typ ) if mev_type == "full": mev_participant = default_participant() - mev_participant["el_client_type"] = ( - mev_participant["el_client_type"] + "-builder" - ) + mev_participant["el_type"] = mev_participant["el_type"] + "-builder" mev_participant.update( { - "el_client_image": parsed_arguments_dict["mev_params"][ - "mev_builder_image" - ], - "cl_client_image": parsed_arguments_dict["mev_params"][ - "mev_builder_cl_image" - ], - "beacon_extra_params": [ + "el_image": parsed_arguments_dict["mev_params"]["mev_builder_image"], + "cl_image": parsed_arguments_dict["mev_params"]["mev_builder_cl_image"], + "cl_extra_params": [ "--always-prepare-payload", "--prepare-payload-lookahead", "12000", diff --git a/src/participant.star b/src/participant.star index 58ed4f4b5..d68d35a68 100644 --- a/src/participant.star +++ b/src/participant.star @@ -1,21 +1,21 @@ def new_participant( - el_client_type, - cl_client_type, - validator_client_type, - el_client_context, - cl_client_context, - validator_client_context, + el_type, + cl_type, + vc_type, + el_context, + cl_context, + vc_context, snooper_engine_context, ethereum_metrics_exporter_context, xatu_sentry_context, ): return struct( - el_client_type=el_client_type, - cl_client_type=cl_client_type, - validator_client_type=validator_client_type, - el_client_context=el_client_context, - cl_client_context=cl_client_context, - validator_client_context=validator_client_context, + el_type=el_type, + cl_type=cl_type, + vc_type=vc_type, + el_context=el_context, + cl_context=cl_context, + vc_context=vc_context, snooper_engine_context=snooper_engine_context, ethereum_metrics_exporter_context=ethereum_metrics_exporter_context, xatu_sentry_context=xatu_sentry_context, diff --git a/src/participant_network.star b/src/participant_network.star index 367ac2954..82dd6a97d 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -1,65 +1,28 @@ -validator_keystores = import_module( - "./prelaunch_data_generator/validator_keystores/validator_keystore_generator.star" -) - el_cl_genesis_data_generator = import_module( "./prelaunch_data_generator/el_cl_genesis/el_cl_genesis_generator.star" ) -el_cl_genesis_data = import_module( - "./prelaunch_data_generator/el_cl_genesis/el_cl_genesis_data.star" -) input_parser = import_module("./package_io/input_parser.star") - shared_utils = import_module("./shared_utils/shared_utils.star") - static_files = import_module("./static_files/static_files.star") - -geth = import_module("./el/geth/geth_launcher.star") -besu = import_module("./el/besu/besu_launcher.star") -erigon = import_module("./el/erigon/erigon_launcher.star") -nethermind = import_module("./el/nethermind/nethermind_launcher.star") -reth = import_module("./el/reth/reth_launcher.star") -ethereumjs = import_module("./el/ethereumjs/ethereumjs_launcher.star") -nimbus_eth1 = import_module("./el/nimbus-eth1/nimbus_launcher.star") - -lighthouse = import_module("./cl/lighthouse/lighthouse_launcher.star") -lodestar = import_module("./cl/lodestar/lodestar_launcher.star") -nimbus = import_module("./cl/nimbus/nimbus_launcher.star") -prysm = import_module("./cl/prysm/prysm_launcher.star") -teku = import_module("./cl/teku/teku_launcher.star") - -validator_client = import_module("./validator_client/validator_client_launcher.star") - -snooper = import_module("./snooper/snooper_engine_launcher.star") +constants = import_module("./package_io/constants.star") ethereum_metrics_exporter = import_module( "./ethereum_metrics_exporter/ethereum_metrics_exporter_launcher.star" ) -xatu_sentry = import_module("./xatu_sentry/xatu_sentry_launcher.star") - -genesis_constants = import_module( - "./prelaunch_data_generator/genesis_constants/genesis_constants.star" -) participant_module = import_module("./participant.star") -constants = import_module("./package_io/constants.star") - -BOOT_PARTICIPANT_INDEX = 0 - -# The time that the CL genesis generation step takes to complete, based off what we've seen -# This is in seconds -CL_GENESIS_DATA_GENERATION_TIME = 5 - -# Each CL node takes about this time to start up and start processing blocks, so when we create the CL -# genesis data we need to set the genesis timestamp in the future so that nodes don't miss important slots -# (e.g. Altair fork) -# TODO(old) Make this client-specific (currently this is Nimbus) -# This is in seconds -CL_NODE_STARTUP_TIME = 5 +xatu_sentry = import_module("./xatu_sentry/xatu_sentry_launcher.star") +launch_ephemery = import_module("./network_launcher/ephemery.star") +launch_public_network = import_module("./network_launcher/public_network.star") +launch_devnet = import_module("./network_launcher/devnet.star") +launch_kurtosis = import_module("./network_launcher/kurtosis.star") +launch_shadowfork = import_module("./network_launcher/shadowfork.star") -CL_CLIENT_CONTEXT_BOOTNODE = None +el_client_launcher = import_module("./el/el_launcher.star") +cl_client_launcher = import_module("./cl/cl_launcher.star") +vc = import_module("./vc/vc_launcher.star") def launch_participant_network( @@ -77,8 +40,8 @@ def launch_participant_network( parallel_keystore_generation=False, ): network_id = network_params.network_id - num_participants = len(participants) latest_block = "" + num_participants = len(participants) cancun_time = 0 prague_time = 0 shadowfork_block = "latest" @@ -96,180 +59,25 @@ def launch_participant_network( if ( constants.NETWORK_NAME.shadowfork in network_params.network ): # shadowfork requires some preparation - base_network = shared_utils.get_network_name(network_params.network) - # overload the network name to remove the shadowfork suffix - if constants.NETWORK_NAME.ephemery in base_network: - chain_id = plan.run_sh( - run="curl -s https://ephemery.dev/latest/config.yaml | yq .DEPOSIT_CHAIN_ID | tr -d '\n'", - image="linuxserver/yq", - ) - network_id = chain_id.output - else: - network_id = constants.NETWORK_ID[ - base_network - ] # overload the network id to match the network name - latest_block = plan.run_sh( # fetch the latest block - run="mkdir -p /shadowfork && \ - curl -o /shadowfork/latest_block.json " - + network_params.network_sync_base_url - + base_network - + "/geth/" - + shadowfork_block - + "/_snapshot_eth_getBlockByNumber.json", - image="badouralix/curl-jq", - store=[StoreSpec(src="/shadowfork", name="latest_blocks")], + latest_block, network_id = launch_shadowfork.shadowfork_prep( + plan, + network_params, + shadowfork_block, + participants, + global_tolerations, + global_node_selectors, ) - for index, participant in enumerate(participants): - tolerations = input_parser.get_client_tolerations( - participant.el_tolerations, - participant.tolerations, - global_tolerations, - ) - node_selectors = input_parser.get_client_node_selectors( - participant.node_selectors, - global_node_selectors, - ) - - cl_client_type = participant.cl_client_type - el_client_type = participant.el_client_type - - # Zero-pad the index using the calculated zfill value - index_str = shared_utils.zfill_custom( - index + 1, len(str(len(participants))) - ) - - el_service_name = "el-{0}-{1}-{2}".format( - index_str, el_client_type, cl_client_type - ) - shadowfork_data = plan.add_service( - name="shadowfork-{0}".format(el_service_name), - config=ServiceConfig( - image="alpine:3.19.1", - cmd=[ - "apk add --no-cache curl tar zstd && curl -s -L " - + network_params.network_sync_base_url - + base_network - + "/" - + el_client_type - + "/" - + shadowfork_block - + "/snapshot.tar.zst" - + " | tar -I zstd -xvf - -C /data/" - + el_client_type - + "/execution-data" - + " && touch /tmp/finished" - + " && tail -f /dev/null" - ], - entrypoint=["/bin/sh", "-c"], - files={ - "/data/" - + el_client_type - + "/execution-data": Directory( - persistent_key="data-{0}".format(el_service_name), - size=constants.VOLUME_SIZE[base_network][ - el_client_type + "_volume_size" - ], - ), - }, - tolerations=tolerations, - node_selectors=node_selectors, - ), - ) - for index, participant in enumerate(participants): - cl_client_type = participant.cl_client_type - el_client_type = participant.el_client_type - - # Zero-pad the index using the calculated zfill value - index_str = shared_utils.zfill_custom( - index + 1, len(str(len(participants))) - ) - - el_service_name = "el-{0}-{1}-{2}".format( - index_str, el_client_type, cl_client_type - ) - plan.wait( - service_name="shadowfork-{0}".format(el_service_name), - recipe=ExecRecipe(command=["cat", "/tmp/finished"]), - field="code", - assertion="==", - target_value=0, - interval="1s", - timeout="6h", # 6 hours should be enough for the biggest network - ) - # We are running a kurtosis or shadowfork network - plan.print("Generating cl validator key stores") - validator_data = None - if not parallel_keystore_generation: - validator_data = validator_keystores.generate_validator_keystores( - plan, network_params.preregistered_validator_keys_mnemonic, participants - ) - else: - validator_data = ( - validator_keystores.generate_valdiator_keystores_in_parallel( - plan, - network_params.preregistered_validator_keys_mnemonic, - participants, - ) - ) - - plan.print(json.indent(json.encode(validator_data))) - - # We need to send the same genesis time to both the EL and the CL to ensure that timestamp based forking works as expected - final_genesis_timestamp = get_final_genesis_timestamp( - plan, - network_params.genesis_delay - + CL_GENESIS_DATA_GENERATION_TIME - + num_participants * CL_NODE_STARTUP_TIME, + ( + total_number_of_validator_keys, + ethereum_genesis_generator_image, + final_genesis_timestamp, + validator_data, + ) = launch_kurtosis.launch( + plan, network_params, participants, parallel_keystore_generation ) - # if preregistered validator count is 0 (default) then calculate the total number of validators from the participants - total_number_of_validator_keys = network_params.preregistered_validator_count - - if network_params.preregistered_validator_count == 0: - for participant in participants: - total_number_of_validator_keys += participant.validator_count - - plan.print("Generating EL CL data") - - # we are running bellatrix genesis (deprecated) - will be removed in the future - if ( - network_params.capella_fork_epoch > 0 - and network_params.electra_fork_epoch == None - ): - ethereum_genesis_generator_image = ( - constants.ETHEREUM_GENESIS_GENERATOR.bellatrix_genesis - ) - # we are running capella genesis - default behavior - elif ( - network_params.capella_fork_epoch == 0 - and network_params.electra_fork_epoch == None - and network_params.deneb_fork_epoch > 0 - ): - ethereum_genesis_generator_image = ( - constants.ETHEREUM_GENESIS_GENERATOR.capella_genesis - ) - # we are running deneb genesis - experimental, soon to become default - elif network_params.deneb_fork_epoch == 0: - ethereum_genesis_generator_image = ( - constants.ETHEREUM_GENESIS_GENERATOR.deneb_genesis - ) - # we are running electra - experimental - elif network_params.electra_fork_epoch != None: - if network_params.electra_fork_epoch == 0: - ethereum_genesis_generator_image = ( - constants.ETHEREUM_GENESIS_GENERATOR.verkle_genesis - ) - else: - ethereum_genesis_generator_image = ( - constants.ETHEREUM_GENESIS_GENERATOR.verkle_support_genesis - ) - else: - fail( - "Unsupported fork epoch configuration, need to define either capella_fork_epoch, deneb_fork_epoch or electra_fork_epoch" - ) - el_cl_genesis_config_template = read_file( static_files.EL_CL_GENESIS_GENERATION_CONFIG_TEMPLATE_FILEPATH ) @@ -297,211 +105,47 @@ def launch_participant_network( ) elif network_params.network in constants.PUBLIC_NETWORKS: # We are running a public network - dummy = plan.run_sh( - run="mkdir /network-configs", - store=[StoreSpec(src="/network-configs/", name="el_cl_genesis_data")], - ) - el_cl_data = el_cl_genesis_data.new_el_cl_genesis_data( - dummy.files_artifacts[0], - constants.GENESIS_VALIDATORS_ROOT[network_params.network], - cancun_time, - prague_time, + ( + el_cl_data, + final_genesis_timestamp, + network_id, + validator_data, + ) = launch_public_network.launch( + plan, network_params.network, cancun_time, prague_time ) - final_genesis_timestamp = constants.GENESIS_TIME[network_params.network] - network_id = constants.NETWORK_ID[network_params.network] - validator_data = None elif network_params.network == constants.NETWORK_NAME.ephemery: - el_cl_genesis_data_uuid = plan.run_sh( - run="mkdir -p /network-configs/ && \ - curl -o latest.tar.gz https://ephemery.dev/latest.tar.gz && \ - tar xvzf latest.tar.gz -C /network-configs && \ - cat /network-configs/genesis_validators_root.txt", - image="badouralix/curl-jq", - store=[StoreSpec(src="/network-configs/", name="el_cl_genesis_data")], - ) - genesis_validators_root = el_cl_genesis_data_uuid.output - el_cl_data = el_cl_genesis_data.new_el_cl_genesis_data( - el_cl_genesis_data_uuid.files_artifacts[0], - genesis_validators_root, - cancun_time, - prague_time, - ) - final_genesis_timestamp = shared_utils.read_genesis_timestamp_from_config( - plan, el_cl_genesis_data_uuid.files_artifacts[0] - ) - network_id = shared_utils.read_genesis_network_id_from_config( - plan, el_cl_genesis_data_uuid.files_artifacts[0] - ) - validator_data = None + # We are running an ephemery network + ( + el_cl_data, + final_genesis_timestamp, + network_id, + validator_data, + ) = launch_ephemery.launch(plan, cancun_time, prague_time) else: # We are running a devnet - url = calculate_devnet_url(network_params.network) - el_cl_genesis_uuid = plan.upload_files( - src=url, - name="el_cl_genesis", - ) - el_cl_genesis_data_uuid = plan.run_sh( - run="mkdir -p /network-configs/ && mv /opt/* /network-configs/", - store=[StoreSpec(src="/network-configs/", name="el_cl_genesis_data")], - files={"/opt": el_cl_genesis_uuid}, - ) - genesis_validators_root = read_file(url + "/genesis_validators_root.txt") - - el_cl_data = el_cl_genesis_data.new_el_cl_genesis_data( - el_cl_genesis_data_uuid.files_artifacts[0], - genesis_validators_root, - cancun_time, - prague_time, - ) - final_genesis_timestamp = shared_utils.read_genesis_timestamp_from_config( - plan, el_cl_genesis_data_uuid.files_artifacts[0] - ) - network_id = shared_utils.read_genesis_network_id_from_config( - plan, el_cl_genesis_data_uuid.files_artifacts[0] - ) - validator_data = None - - el_launchers = { - constants.EL_CLIENT_TYPE.geth: { - "launcher": geth.new_geth_launcher( - el_cl_data, - jwt_file, - network_params.network, - network_id, - network_params.capella_fork_epoch, - el_cl_data.cancun_time, - el_cl_data.prague_time, - network_params.electra_fork_epoch, - ), - "launch_method": geth.launch, - }, - constants.EL_CLIENT_TYPE.gethbuilder: { - "launcher": geth.new_geth_launcher( - el_cl_data, - jwt_file, - network_params.network, - network_id, - network_params.capella_fork_epoch, - el_cl_data.cancun_time, - el_cl_data.prague_time, - network_params.electra_fork_epoch, - ), - "launch_method": geth.launch, - }, - constants.EL_CLIENT_TYPE.besu: { - "launcher": besu.new_besu_launcher( - el_cl_data, - jwt_file, - network_params.network, - ), - "launch_method": besu.launch, - }, - constants.EL_CLIENT_TYPE.erigon: { - "launcher": erigon.new_erigon_launcher( - el_cl_data, - jwt_file, - network_params.network, - network_id, - el_cl_data.cancun_time, - ), - "launch_method": erigon.launch, - }, - constants.EL_CLIENT_TYPE.nethermind: { - "launcher": nethermind.new_nethermind_launcher( - el_cl_data, - jwt_file, - network_params.network, - ), - "launch_method": nethermind.launch, - }, - constants.EL_CLIENT_TYPE.reth: { - "launcher": reth.new_reth_launcher( - el_cl_data, - jwt_file, - network_params.network, - ), - "launch_method": reth.launch, - }, - constants.EL_CLIENT_TYPE.ethereumjs: { - "launcher": ethereumjs.new_ethereumjs_launcher( - el_cl_data, - jwt_file, - network_params.network, - ), - "launch_method": ethereumjs.launch, - }, - constants.EL_CLIENT_TYPE.nimbus: { - "launcher": nimbus_eth1.new_nimbus_launcher( - el_cl_data, - jwt_file, - network_params.network, - ), - "launch_method": nimbus_eth1.launch, - }, - } - - all_el_client_contexts = [] - - for index, participant in enumerate(participants): - cl_client_type = participant.cl_client_type - el_client_type = participant.el_client_type - node_selectors = input_parser.get_client_node_selectors( - participant.node_selectors, - global_node_selectors, - ) - tolerations = input_parser.get_client_tolerations( - participant.el_tolerations, participant.tolerations, global_tolerations - ) - if el_client_type not in el_launchers: - fail( - "Unsupported launcher '{0}', need one of '{1}'".format( - el_client_type, ",".join([el.name for el in el_launchers.keys()]) - ) - ) - - el_launcher, launch_method = ( - el_launchers[el_client_type]["launcher"], - el_launchers[el_client_type]["launch_method"], - ) - - # Zero-pad the index using the calculated zfill value - index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants)))) - - el_service_name = "el-{0}-{1}-{2}".format( - index_str, el_client_type, cl_client_type - ) - - el_client_context = launch_method( - plan, - el_launcher, - el_service_name, - participant.el_client_image, - participant.el_client_log_level, - global_log_level, - all_el_client_contexts, - participant.el_min_cpu, - participant.el_max_cpu, - participant.el_min_mem, - participant.el_max_mem, - participant.el_extra_params, - participant.el_extra_env_vars, - participant.el_extra_labels, - persistent, - participant.el_client_volume_size, - tolerations, - node_selectors, - ) - - # Add participant el additional prometheus metrics - for metrics_info in el_client_context.el_metrics_info: - if metrics_info != None: - metrics_info["config"] = participant.prometheus_config - - all_el_client_contexts.append(el_client_context) - - plan.print("Successfully added {0} EL participants".format(num_participants)) + ( + el_cl_data, + final_genesis_timestamp, + network_id, + validator_data, + ) = launch_devnet.launch(plan, network_params.network, cancun_time, prague_time) + + # Launch all execution layer clients + all_el_contexts = el_client_launcher.launch( + plan, + network_params, + el_cl_data, + jwt_file, + participants, + global_log_level, + global_node_selectors, + global_tolerations, + persistent, + network_id, + num_participants, + ) - plan.print("Launching CL network") + # Launch all consensus layer clients prysm_password_relative_filepath = ( validator_data.prysm_password_relative_filepath if network_params.network == constants.NETWORK_NAME.kurtosis @@ -512,181 +156,51 @@ def launch_participant_network( if network_params.network == constants.NETWORK_NAME.kurtosis else None ) - cl_launchers = { - constants.CL_CLIENT_TYPE.lighthouse: { - "launcher": lighthouse.new_lighthouse_launcher( - el_cl_data, jwt_file, network_params.network - ), - "launch_method": lighthouse.launch, - }, - constants.CL_CLIENT_TYPE.lodestar: { - "launcher": lodestar.new_lodestar_launcher( - el_cl_data, jwt_file, network_params.network - ), - "launch_method": lodestar.launch, - }, - constants.CL_CLIENT_TYPE.nimbus: { - "launcher": nimbus.new_nimbus_launcher( - el_cl_data, jwt_file, network_params.network, keymanager_file - ), - "launch_method": nimbus.launch, - }, - constants.CL_CLIENT_TYPE.prysm: { - "launcher": prysm.new_prysm_launcher( - el_cl_data, - jwt_file, - network_params.network, - prysm_password_relative_filepath, - prysm_password_artifact_uuid, - ), - "launch_method": prysm.launch, - }, - constants.CL_CLIENT_TYPE.teku: { - "launcher": teku.new_teku_launcher( - el_cl_data, - jwt_file, - network_params.network, - keymanager_file, - keymanager_p12_file, - ), - "launch_method": teku.launch, - }, - } - - all_snooper_engine_contexts = [] - all_cl_client_contexts = [] - all_ethereum_metrics_exporter_contexts = [] - all_xatu_sentry_contexts = [] - preregistered_validator_keys_for_nodes = ( - validator_data.per_node_keystores - if network_params.network == constants.NETWORK_NAME.kurtosis - or constants.NETWORK_NAME.shadowfork in network_params.network - else None + + ( + all_cl_contexts, + all_snooper_engine_contexts, + preregistered_validator_keys_for_nodes, + ) = cl_client_launcher.launch( + plan, + network_params, + el_cl_data, + jwt_file, + keymanager_file, + keymanager_p12_file, + participants, + all_el_contexts, + global_log_level, + global_node_selectors, + global_tolerations, + persistent, + network_id, + num_participants, + validator_data, + prysm_password_relative_filepath, + prysm_password_artifact_uuid, ) + ethereum_metrics_exporter_context = None + all_ethereum_metrics_exporter_contexts = [] + all_xatu_sentry_contexts = [] + all_vc_contexts = [] + # Some CL clients cannot run validator clients in the same process and need + # a separate validator client + _cls_that_need_separate_vc = [ + constants.CL_TYPE.prysm, + constants.CL_TYPE.lodestar, + constants.CL_TYPE.lighthouse, + ] for index, participant in enumerate(participants): - cl_client_type = participant.cl_client_type - el_client_type = participant.el_client_type - node_selectors = input_parser.get_client_node_selectors( - participant.node_selectors, - global_node_selectors, - ) - - if cl_client_type not in cl_launchers: - fail( - "Unsupported launcher '{0}', need one of '{1}'".format( - cl_client_type, ",".join([cl.name for cl in cl_launchers.keys()]) - ) - ) - - cl_launcher, launch_method = ( - cl_launchers[cl_client_type]["launcher"], - cl_launchers[cl_client_type]["launch_method"], - ) - + el_type = participant.el_type + cl_type = participant.cl_type + vc_type = participant.vc_type index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants)))) - - cl_service_name = "cl-{0}-{1}-{2}".format( - index_str, cl_client_type, el_client_type - ) - new_cl_node_validator_keystores = None - if participant.validator_count != 0: - new_cl_node_validator_keystores = preregistered_validator_keys_for_nodes[ - index - ] - - el_client_context = all_el_client_contexts[index] - - cl_client_context = None - snooper_engine_context = None - if participant.snooper_enabled: - snooper_service_name = "snooper-{0}-{1}-{2}".format( - index_str, cl_client_type, el_client_type - ) - snooper_engine_context = snooper.launch( - plan, - snooper_service_name, - el_client_context, - node_selectors, - ) - plan.print( - "Successfully added {0} snooper participants".format( - snooper_engine_context - ) - ) - all_snooper_engine_contexts.append(snooper_engine_context) - - if index == 0: - cl_client_context = launch_method( - plan, - cl_launcher, - cl_service_name, - participant.cl_client_image, - participant.cl_client_log_level, - global_log_level, - CL_CLIENT_CONTEXT_BOOTNODE, - el_client_context, - new_cl_node_validator_keystores, - participant.bn_min_cpu, - participant.bn_max_cpu, - participant.bn_min_mem, - participant.bn_max_mem, - participant.snooper_enabled, - snooper_engine_context, - participant.blobber_enabled, - participant.blobber_extra_params, - participant.beacon_extra_params, - participant.beacon_extra_labels, - persistent, - participant.cl_client_volume_size, - participant.cl_tolerations, - participant.tolerations, - global_tolerations, - node_selectors, - participant.use_separate_validator_client, - ) - else: - boot_cl_client_ctx = all_cl_client_contexts - cl_client_context = launch_method( - plan, - cl_launcher, - cl_service_name, - participant.cl_client_image, - participant.cl_client_log_level, - global_log_level, - boot_cl_client_ctx, - el_client_context, - new_cl_node_validator_keystores, - participant.bn_min_cpu, - participant.bn_max_cpu, - participant.bn_min_mem, - participant.bn_max_mem, - participant.snooper_enabled, - snooper_engine_context, - participant.blobber_enabled, - participant.blobber_extra_params, - participant.beacon_extra_params, - participant.beacon_extra_labels, - persistent, - participant.cl_client_volume_size, - participant.cl_tolerations, - participant.tolerations, - global_tolerations, - node_selectors, - participant.use_separate_validator_client, - ) - - # Add participant cl additional prometheus labels - for metrics_info in cl_client_context.cl_nodes_metrics_info: - if metrics_info != None: - metrics_info["config"] = participant.prometheus_config - - all_cl_client_contexts.append(cl_client_context) - - ethereum_metrics_exporter_context = None - + el_context = all_el_contexts[index] + cl_context = all_cl_contexts[index] if participant.ethereum_metrics_exporter_enabled: - pair_name = "{0}-{1}-{2}".format(index_str, cl_client_type, el_client_type) + pair_name = "{0}-{1}-{2}".format(index_str, cl_type, el_type) ethereum_metrics_exporter_service_name = ( "ethereum-metrics-exporter-{0}".format(pair_name) @@ -696,9 +210,9 @@ def launch_participant_network( plan, pair_name, ethereum_metrics_exporter_service_name, - el_client_context, - cl_client_context, - node_selectors, + el_context, + cl_context, + participant.node_selectors, ) plan.print( "Successfully added {0} ethereum metrics exporter participants".format( @@ -711,18 +225,18 @@ def launch_participant_network( xatu_sentry_context = None if participant.xatu_sentry_enabled: - pair_name = "{0}-{1}-{2}".format(index_str, cl_client_type, el_client_type) + pair_name = "{0}-{1}-{2}".format(index_str, cl_type, el_type) xatu_sentry_service_name = "xatu-sentry-{0}".format(pair_name) xatu_sentry_context = xatu_sentry.launch( plan, xatu_sentry_service_name, - cl_client_context, + cl_context, xatu_sentry_params, network_params, pair_name, - node_selectors, + participant.node_selectors, ) plan.print( "Successfully added {0} xatu sentry participants".format( @@ -732,42 +246,22 @@ def launch_participant_network( all_xatu_sentry_contexts.append(xatu_sentry_context) - plan.print("Successfully added {0} CL participants".format(num_participants)) - - all_validator_client_contexts = [] - # Some CL clients cannot run validator clients in the same process and need - # a separate validator client - _cls_that_need_separate_vc = [ - constants.CL_CLIENT_TYPE.prysm, - constants.CL_CLIENT_TYPE.lodestar, - constants.CL_CLIENT_TYPE.lighthouse, - ] - for index, participant in enumerate(participants): - cl_client_type = participant.cl_client_type - validator_client_type = participant.validator_client_type + plan.print("Successfully added {0} CL participants".format(num_participants)) - if participant.use_separate_validator_client == None: + plan.print("Start adding validators for participant #{0}".format(index_str)) + if participant.use_separate_vc == None: # This should only be the case for the MEV participant, # the regular participants default to False/True - all_validator_client_contexts.append(None) + all_vc_contexts.append(None) continue - if ( - cl_client_type in _cls_that_need_separate_vc - and not participant.use_separate_validator_client - ): - fail("{0} needs a separate validator client!".format(cl_client_type)) + if cl_type in _cls_that_need_separate_vc and not participant.use_separate_vc: + fail("{0} needs a separate validator client!".format(cl_type)) - if not participant.use_separate_validator_client: - all_validator_client_contexts.append(None) + if not participant.use_separate_vc: + all_vc_contexts.append(None) continue - el_client_context = all_el_client_contexts[index] - cl_client_context = all_cl_client_contexts[index] - - # Zero-pad the index using the calculated zfill value - index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants)))) - plan.print( "Using separate validator client for participant #{0}".format(index_str) ) @@ -776,55 +270,51 @@ def launch_participant_network( if participant.validator_count != 0: vc_keystores = preregistered_validator_keys_for_nodes[index] - validator_client_context = validator_client.launch( + vc_context = vc.launch( plan=plan, - launcher=validator_client.new_validator_client_launcher( - el_cl_genesis_data=el_cl_data - ), + launcher=vc.new_vc_launcher(el_cl_genesis_data=el_cl_data), keymanager_file=keymanager_file, keymanager_p12_file=keymanager_p12_file, - service_name="vc-{0}-{1}-{2}".format( - index_str, validator_client_type, el_client_type - ), - validator_client_type=validator_client_type, - image=participant.validator_client_image, - participant_log_level=participant.validator_client_log_level, + service_name="vc-{0}-{1}-{2}".format(index_str, vc_type, el_type), + vc_type=vc_type, + image=participant.vc_image, + participant_log_level=participant.vc_log_level, global_log_level=global_log_level, - cl_client_context=cl_client_context, - el_client_context=el_client_context, + cl_context=cl_context, + el_context=el_context, node_keystore_files=vc_keystores, - v_min_cpu=participant.v_min_cpu, - v_max_cpu=participant.v_max_cpu, - v_min_mem=participant.v_min_mem, - v_max_mem=participant.v_max_mem, - extra_params=participant.validator_extra_params, - extra_labels=participant.validator_extra_labels, + vc_min_cpu=participant.vc_min_cpu, + vc_max_cpu=participant.vc_max_cpu, + vc_min_mem=participant.vc_min_mem, + vc_max_mem=participant.vc_max_mem, + extra_params=participant.vc_extra_params, + extra_env_vars=participant.vc_extra_env_vars, + extra_labels=participant.vc_extra_labels, prysm_password_relative_filepath=prysm_password_relative_filepath, prysm_password_artifact_uuid=prysm_password_artifact_uuid, - validator_tolerations=participant.validator_tolerations, + vc_tolerations=participant.vc_tolerations, participant_tolerations=participant.tolerations, global_tolerations=global_tolerations, - node_selectors=node_selectors, - network=network_params.network, # TODO: remove when deneb rebase is done - electra_fork_epoch=network_params.electra_fork_epoch, # TODO: remove when deneb rebase is done + node_selectors=participant.node_selectors, + network=network_params.network, + electra_fork_epoch=network_params.electra_fork_epoch, ) - all_validator_client_contexts.append(validator_client_context) + all_vc_contexts.append(vc_context) - if validator_client_context and validator_client_context.metrics_info: - validator_client_context.metrics_info[ - "config" - ] = participant.prometheus_config + if vc_context and vc_context.metrics_info: + vc_context.metrics_info["config"] = participant.prometheus_config all_participants = [] for index, participant in enumerate(participants): - el_client_type = participant.el_client_type - cl_client_type = participant.cl_client_type - validator_client_type = participant.validator_client_type + el_type = participant.el_type + cl_type = participant.cl_type + vc_type = participant.vc_type + snooper_engine_context = None - el_client_context = all_el_client_contexts[index] - cl_client_context = all_cl_client_contexts[index] - validator_client_context = all_validator_client_contexts[index] + el_context = all_el_contexts[index] + cl_context = all_cl_contexts[index] + vc_context = all_vc_contexts[index] if participant.snooper_enabled: snooper_engine_context = all_snooper_engine_contexts[index] @@ -841,12 +331,12 @@ def launch_participant_network( xatu_sentry_context = all_xatu_sentry_contexts[index] participant_entry = participant_module.new_participant( - el_client_type, - cl_client_type, - validator_client_type, - el_client_context, - cl_client_context, - validator_client_context, + el_type, + cl_type, + vc_type, + el_context, + cl_context, + vc_context, snooper_engine_context, ethereum_metrics_exporter_context, xatu_sentry_context, @@ -860,44 +350,3 @@ def launch_participant_network( el_cl_data.genesis_validators_root, el_cl_data.files_artifact_uuid, ) - - -# this is a python procedure so that Kurtosis can do idempotent runs -# time.now() runs everytime bringing non determinism -# note that the timestamp it returns is a string -def get_final_genesis_timestamp(plan, padding): - result = plan.run_python( - run=""" -import time -import sys -padding = int(sys.argv[1]) -print(int(time.time()+padding), end="") -""", - args=[str(padding)], - store=[StoreSpec(src="/tmp", name="final-genesis-timestamp")], - ) - return result.output - - -def calculate_devnet_url(network): - sf_suffix_mapping = {"hsf": "-hsf-", "gsf": "-gsf-", "ssf": "-ssf-"} - shadowfork = "sf-" in network - - if shadowfork: - for suffix, delimiter in sf_suffix_mapping.items(): - if delimiter in network: - network_parts = network.split(delimiter, 1) - network_type = suffix - else: - network_parts = network.split("-devnet-", 1) - network_type = "devnet" - - devnet_name, devnet_number = network_parts[0], network_parts[1] - devnet_category = devnet_name.split("-")[0] - devnet_subname = ( - devnet_name.split("-")[1] + "-" if len(devnet_name.split("-")) > 1 else "" - ) - - return "github.com/ethpandaops/{0}-devnets/network-configs/{1}{2}-{3}".format( - devnet_category, devnet_subname, network_type, devnet_number - ) diff --git a/src/prelaunch_data_generator/validator_keystores/validator_keystore_generator.star b/src/prelaunch_data_generator/validator_keystores/validator_keystore_generator.star index e2ac0bded..3f2a22ec9 100644 --- a/src/prelaunch_data_generator/validator_keystores/validator_keystore_generator.star +++ b/src/prelaunch_data_generator/validator_keystores/validator_keystore_generator.star @@ -134,8 +134,8 @@ def generate_validator_keystores(plan, mnemonic, participants): keystore_stop_index = (keystore_start_index + participant.validator_count) - 1 artifact_name = "{0}-{1}-{2}-{3}-{4}".format( padded_idx, - participant.cl_client_type, - participant.el_client_type, + participant.cl_type, + participant.el_type, keystore_start_index, keystore_stop_index, ) @@ -286,8 +286,8 @@ def generate_valdiator_keystores_in_parallel(plan, mnemonic, participants): keystore_stop_index = (keystore_start_index + participant.validator_count) - 1 artifact_name = "{0}-{1}-{2}-{3}-{4}".format( padded_idx, - participant.cl_client_type, - participant.el_client_type, + participant.cl_type, + participant.el_type, keystore_start_index, keystore_stop_index, ) diff --git a/src/prelaunch_data_generator/validator_keystores/validator_ranges_generator.star b/src/prelaunch_data_generator/validator_keystores/validator_ranges_generator.star index 943bcdd84..3004ba43c 100644 --- a/src/prelaunch_data_generator/validator_keystores/validator_ranges_generator.star +++ b/src/prelaunch_data_generator/validator_keystores/validator_ranges_generator.star @@ -5,12 +5,12 @@ shared_utils = import_module("../../shared_utils/shared_utils.star") def generate_validator_ranges( plan, config_template, - cl_client_contexts, + cl_contexts, participants, ): data = [] running_total_validator_count = 0 - for index, client in enumerate(cl_client_contexts): + for index, client in enumerate(cl_contexts): participant = participants[index] if participant.validator_count == 0: continue diff --git a/src/prometheus/prometheus_launcher.star b/src/prometheus/prometheus_launcher.star index acb83a557..e8a56bbd6 100644 --- a/src/prometheus/prometheus_launcher.star +++ b/src/prometheus/prometheus_launcher.star @@ -3,7 +3,7 @@ prometheus = import_module("github.com/kurtosis-tech/prometheus-package/main.sta EXECUTION_CLIENT_TYPE = "execution" BEACON_CLIENT_TYPE = "beacon" -VALIDATOR_CLIENT_TYPE = "validator" +vc_type = "validator" METRICS_INFO_NAME_KEY = "name" METRICS_INFO_URL_KEY = "url" @@ -21,18 +21,18 @@ MAX_MEMORY = 2048 def launch_prometheus( plan, - el_client_contexts, - cl_client_contexts, - validator_client_contexts, + el_contexts, + cl_contexts, + vc_contexts, additional_metrics_jobs, ethereum_metrics_exporter_contexts, xatu_sentry_contexts, global_node_selectors, ): metrics_jobs = get_metrics_jobs( - el_client_contexts, - cl_client_contexts, - validator_client_contexts, + el_contexts, + cl_contexts, + vc_contexts, additional_metrics_jobs, ethereum_metrics_exporter_contexts, xatu_sentry_contexts, @@ -51,16 +51,16 @@ def launch_prometheus( def get_metrics_jobs( - el_client_contexts, - cl_client_contexts, - validator_client_contexts, + el_contexts, + cl_contexts, + vc_contexts, additional_metrics_jobs, ethereum_metrics_exporter_contexts, xatu_sentry_contexts, ): metrics_jobs = [] # Adding execution clients metrics jobs - for context in el_client_contexts: + for context in el_contexts: if len(context.el_metrics_info) >= 1 and context.el_metrics_info[0] != None: execution_metrics_info = context.el_metrics_info[0] scrape_interval = PROMETHEUS_DEFAULT_SCRAPE_INTERVAL @@ -90,7 +90,7 @@ def get_metrics_jobs( ) ) # Adding consensus clients metrics jobs - for context in cl_client_contexts: + for context in cl_contexts: if ( len(context.cl_nodes_metrics_info) >= 1 and context.cl_nodes_metrics_info[0] != None @@ -123,7 +123,7 @@ def get_metrics_jobs( ) # Adding validator clients metrics jobs - for context in validator_client_contexts: + for context in vc_contexts: if context == None: continue metrics_info = context.metrics_info @@ -131,7 +131,7 @@ def get_metrics_jobs( scrape_interval = PROMETHEUS_DEFAULT_SCRAPE_INTERVAL labels = { "service": context.service_name, - "client_type": VALIDATOR_CLIENT_TYPE, + "client_type": vc_type, "client_name": context.client_name, } diff --git a/src/shared_utils/shared_utils.star b/src/shared_utils/shared_utils.star index da9f63b79..f540044e1 100644 --- a/src/shared_utils/shared_utils.star +++ b/src/shared_utils/shared_utils.star @@ -155,3 +155,44 @@ def get_network_name(network): network_name = network.split("-shadowfork")[0] return network_name + + +# this is a python procedure so that Kurtosis can do idempotent runs +# time.now() runs everytime bringing non determinism +# note that the timestamp it returns is a string +def get_final_genesis_timestamp(plan, padding): + result = plan.run_python( + run=""" +import time +import sys +padding = int(sys.argv[1]) +print(int(time.time()+padding), end="") +""", + args=[str(padding)], + store=[StoreSpec(src="/tmp", name="final-genesis-timestamp")], + ) + return result.output + + +def calculate_devnet_url(network): + sf_suffix_mapping = {"hsf": "-hsf-", "gsf": "-gsf-", "ssf": "-ssf-"} + shadowfork = "sf-" in network + + if shadowfork: + for suffix, delimiter in sf_suffix_mapping.items(): + if delimiter in network: + network_parts = network.split(delimiter, 1) + network_type = suffix + else: + network_parts = network.split("-devnet-", 1) + network_type = "devnet" + + devnet_name, devnet_number = network_parts[0], network_parts[1] + devnet_category = devnet_name.split("-")[0] + devnet_subname = ( + devnet_name.split("-")[1] + "-" if len(devnet_name.split("-")) > 1 else "" + ) + + return "github.com/ethpandaops/{0}-devnets/network-configs/{1}{2}-{3}".format( + devnet_category, devnet_subname, network_type, devnet_number + ) diff --git a/src/snooper/snooper_engine_launcher.star b/src/snooper/snooper_engine_launcher.star index 636f0edab..70fd68a6f 100644 --- a/src/snooper/snooper_engine_launcher.star +++ b/src/snooper/snooper_engine_launcher.star @@ -1,6 +1,6 @@ shared_utils = import_module("../shared_utils/shared_utils.star") input_parser = import_module("../package_io/input_parser.star") -el_client_context = import_module("../el/el_client_context.star") +el_context = import_module("../el/el_context.star") el_admin_node_info = import_module("../el/el_admin_node_info.star") snooper_engine_context = import_module("../snooper/snooper_engine_context.star") @@ -25,10 +25,10 @@ MIN_MEMORY = 10 MAX_MEMORY = 300 -def launch(plan, service_name, el_client_context, node_selectors): +def launch(plan, service_name, el_context, node_selectors): snooper_service_name = "{0}".format(service_name) - snooper_config = get_config(service_name, el_client_context, node_selectors) + snooper_config = get_config(service_name, el_context, node_selectors) snooper_service = plan.add_service(snooper_service_name, snooper_config) snooper_http_port = snooper_service.ports[SNOOPER_ENGINE_RPC_PORT_ID] @@ -37,10 +37,10 @@ def launch(plan, service_name, el_client_context, node_selectors): ) -def get_config(service_name, el_client_context, node_selectors): +def get_config(service_name, el_context, node_selectors): engine_rpc_port_num = "http://{0}:{1}".format( - el_client_context.ip_addr, - el_client_context.engine_rpc_port_num, + el_context.ip_addr, + el_context.engine_rpc_port_num, ) cmd = [ SNOOPER_BINARY_COMMAND, diff --git a/src/validator_client/lighthouse.star b/src/vc/lighthouse.star similarity index 66% rename from src/validator_client/lighthouse.star rename to src/vc/lighthouse.star index 9846691e9..70d22d486 100644 --- a/src/validator_client/lighthouse.star +++ b/src/vc/lighthouse.star @@ -1,17 +1,17 @@ constants = import_module("../package_io/constants.star") input_parser = import_module("../package_io/input_parser.star") shared_utils = import_module("../shared_utils/shared_utils.star") -validator_client_shared = import_module("./shared.star") +vc_shared = import_module("./shared.star") RUST_BACKTRACE_ENVVAR_NAME = "RUST_BACKTRACE" RUST_FULL_BACKTRACE_KEYWORD = "full" VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace", + constants.GLOBAL_LOG_LEVEL.error: "error", + constants.GLOBAL_LOG_LEVEL.warn: "warn", + constants.GLOBAL_LOG_LEVEL.info: "info", + constants.GLOBAL_LOG_LEVEL.debug: "debug", + constants.GLOBAL_LOG_LEVEL.trace: "trace", } @@ -21,14 +21,15 @@ def get_config( participant_log_level, global_log_level, beacon_http_url, - cl_client_context, - el_client_context, + cl_context, + el_context, node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, + vc_min_cpu, + vc_max_cpu, + vc_min_mem, + vc_max_mem, extra_params, + extra_env_vars, extra_labels, tolerations, node_selectors, @@ -40,17 +41,17 @@ def get_config( ) validator_keys_dirpath = shared_utils.path_join( - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, node_keystore_files.raw_keys_relative_dirpath, ) validator_secrets_dirpath = shared_utils.path_join( - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, node_keystore_files.raw_secrets_relative_dirpath, ) cmd = [ "lighthouse", - "validator_client", + "vc", "--debug-level=" + log_level, "--testnet-dir=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER, "--validators-dir=" + validator_keys_dirpath, @@ -63,7 +64,7 @@ def get_config( # burn address - If unset, the validator will scream in its logs "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, "--http", - "--http-port={0}".format(validator_client_shared.VALIDATOR_HTTP_PORT_NUM), + "--http-port={0}".format(vc_shared.VALIDATOR_HTTP_PORT_NUM), "--http-address=0.0.0.0", "--http-allow-origin=*", "--unencrypted-http-transport", @@ -71,14 +72,9 @@ def get_config( "--metrics", "--metrics-address=0.0.0.0", "--metrics-allow-origin=*", - "--metrics-port={0}".format( - validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM - ), + "--metrics-port={0}".format(vc_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM), # ^^^^^^^^^^^^^^^^^^^ PROMETHEUS CONFIG ^^^^^^^^^^^^^^^^^^^^^ - "--graffiti=" - + cl_client_context.client_name - + "-" - + el_client_context.client_name, + "--graffiti=" + cl_context.client_name + "-" + el_context.client_name, ] if not (constants.NETWORK_NAME.verkle in network or electra_fork_epoch != None): @@ -89,24 +85,25 @@ def get_config( files = { constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, } - + env = {RUST_BACKTRACE_ENVVAR_NAME: RUST_FULL_BACKTRACE_KEYWORD} + env.update(extra_env_vars) return ServiceConfig( image=image, - ports=validator_client_shared.VALIDATOR_CLIENT_USED_PORTS, + ports=vc_shared.VALIDATOR_CLIENT_USED_PORTS, cmd=cmd, + env_vars=env, files=files, - env_vars={RUST_BACKTRACE_ENVVAR_NAME: RUST_FULL_BACKTRACE_KEYWORD}, - min_cpu=v_min_cpu, - max_cpu=v_max_cpu, - min_memory=v_min_mem, - max_memory=v_max_mem, + min_cpu=vc_min_cpu, + max_cpu=vc_max_cpu, + min_memory=vc_min_mem, + max_memory=vc_max_mem, labels=shared_utils.label_maker( - constants.VC_CLIENT_TYPE.lighthouse, + constants.VC_TYPE.lighthouse, constants.CLIENT_TYPES.validator, image, - cl_client_context.client_name, + cl_context.client_name, extra_labels, ), tolerations=tolerations, diff --git a/src/validator_client/lodestar.star b/src/vc/lodestar.star similarity index 61% rename from src/validator_client/lodestar.star rename to src/vc/lodestar.star index 23e02e044..2a674acc1 100644 --- a/src/validator_client/lodestar.star +++ b/src/vc/lodestar.star @@ -1,14 +1,14 @@ constants = import_module("../package_io/constants.star") input_parser = import_module("../package_io/input_parser.star") shared_utils = import_module("../shared_utils/shared_utils.star") -validator_client_shared = import_module("./shared.star") +vc_shared = import_module("./shared.star") VERBOSITY_LEVELS = { - constants.GLOBAL_CLIENT_LOG_LEVEL.error: "error", - constants.GLOBAL_CLIENT_LOG_LEVEL.warn: "warn", - constants.GLOBAL_CLIENT_LOG_LEVEL.info: "info", - constants.GLOBAL_CLIENT_LOG_LEVEL.debug: "debug", - constants.GLOBAL_CLIENT_LOG_LEVEL.trace: "trace", + constants.GLOBAL_LOG_LEVEL.error: "error", + constants.GLOBAL_LOG_LEVEL.warn: "warn", + constants.GLOBAL_LOG_LEVEL.info: "info", + constants.GLOBAL_LOG_LEVEL.debug: "debug", + constants.GLOBAL_LOG_LEVEL.trace: "trace", } @@ -18,14 +18,15 @@ def get_config( participant_log_level, global_log_level, beacon_http_url, - cl_client_context, - el_client_context, + cl_context, + el_context, node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, + vc_min_cpu, + vc_max_cpu, + vc_min_mem, + vc_max_mem, extra_params, + extra_env_vars, extra_labels, tolerations, node_selectors, @@ -35,12 +36,12 @@ def get_config( ) validator_keys_dirpath = shared_utils.path_join( - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, node_keystore_files.raw_keys_relative_dirpath, ) validator_secrets_dirpath = shared_utils.path_join( - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, node_keystore_files.raw_secrets_relative_dirpath, ) @@ -56,20 +57,15 @@ def get_config( "--suggestedFeeRecipient=" + constants.VALIDATING_REWARDS_ACCOUNT, "--keymanager", "--keymanager.authEnabled=true", - "--keymanager.port={0}".format(validator_client_shared.VALIDATOR_HTTP_PORT_NUM), + "--keymanager.port={0}".format(vc_shared.VALIDATOR_HTTP_PORT_NUM), "--keymanager.address=0.0.0.0", "--keymanager.cors=*", # vvvvvvvvvvvvvvvvvvv PROMETHEUS CONFIG vvvvvvvvvvvvvvvvvvvvv "--metrics", "--metrics.address=0.0.0.0", - "--metrics.port={0}".format( - validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM - ), + "--metrics.port={0}".format(vc_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM), # ^^^^^^^^^^^^^^^^^^^ PROMETHEUS CONFIG ^^^^^^^^^^^^^^^^^^^^^ - "--graffiti=" - + cl_client_context.client_name - + "-" - + el_client_context.client_name, + "--graffiti=" + cl_context.client_name + "-" + el_context.client_name, "--useProduceBlockV3", ] @@ -79,24 +75,25 @@ def get_config( files = { constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, } return ServiceConfig( image=image, - ports=validator_client_shared.VALIDATOR_CLIENT_USED_PORTS, + ports=vc_shared.VALIDATOR_CLIENT_USED_PORTS, cmd=cmd, + env_vars=extra_env_vars, files=files, - private_ip_address_placeholder=validator_client_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, - min_cpu=v_min_cpu, - max_cpu=v_max_cpu, - min_memory=v_min_mem, - max_memory=v_max_mem, + private_ip_address_placeholder=vc_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, + min_cpu=vc_min_cpu, + max_cpu=vc_max_cpu, + min_memory=vc_min_mem, + max_memory=vc_max_mem, labels=shared_utils.label_maker( - constants.VC_CLIENT_TYPE.lodestar, + constants.VC_TYPE.lodestar, constants.CLIENT_TYPES.validator, image, - cl_client_context.client_name, + cl_context.client_name, extra_labels, ), tolerations=tolerations, diff --git a/src/validator_client/nimbus.star b/src/vc/nimbus.star similarity index 62% rename from src/validator_client/nimbus.star rename to src/vc/nimbus.star index 164b35ac6..1b7a96461 100644 --- a/src/validator_client/nimbus.star +++ b/src/vc/nimbus.star @@ -1,6 +1,6 @@ constants = import_module("../package_io/constants.star") shared_utils = import_module("../shared_utils/shared_utils.star") -validator_client_shared = import_module("./shared.star") +vc_shared = import_module("./shared.star") def get_config( @@ -8,14 +8,15 @@ def get_config( image, keymanager_file, beacon_http_url, - cl_client_context, - el_client_context, + cl_context, + el_context, node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, + vc_min_cpu, + vc_max_cpu, + vc_min_mem, + vc_max_mem, extra_params, + extra_env_vars, extra_labels, tolerations, node_selectors, @@ -24,11 +25,11 @@ def get_config( validator_secrets_dirpath = "" if node_keystore_files != None: validator_keys_dirpath = shared_utils.path_join( - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, node_keystore_files.nimbus_keys_relative_dirpath, ) validator_secrets_dirpath = shared_utils.path_join( - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, node_keystore_files.raw_secrets_relative_dirpath, ) @@ -38,20 +39,15 @@ def get_config( "--secrets-dir=" + validator_secrets_dirpath, "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, "--keymanager", - "--keymanager-port={0}".format(validator_client_shared.VALIDATOR_HTTP_PORT_NUM), + "--keymanager-port={0}".format(vc_shared.VALIDATOR_HTTP_PORT_NUM), "--keymanager-address=0.0.0.0", "--keymanager-allow-origin=*", "--keymanager-token-file=" + constants.KEYMANAGER_MOUNT_PATH_ON_CONTAINER, # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv "--metrics", "--metrics-address=0.0.0.0", - "--metrics-port={0}".format( - validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM - ), - "--graffiti=" - + cl_client_context.client_name - + "-" - + el_client_context.client_name, + "--metrics-port={0}".format(vc_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM), + "--graffiti=" + cl_context.client_name + "-" + el_context.client_name, ] if len(extra_params) > 0: @@ -59,25 +55,26 @@ def get_config( cmd.extend([param for param in extra_params]) files = { - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, constants.KEYMANAGER_MOUNT_PATH_ON_CLIENTS: keymanager_file, } return ServiceConfig( image=image, - ports=validator_client_shared.VALIDATOR_CLIENT_USED_PORTS, + ports=vc_shared.VALIDATOR_CLIENT_USED_PORTS, cmd=cmd, + env_vars=extra_env_vars, files=files, - private_ip_address_placeholder=validator_client_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, - min_cpu=v_min_cpu, - max_cpu=v_max_cpu, - min_memory=v_min_mem, - max_memory=v_max_mem, + private_ip_address_placeholder=vc_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, + min_cpu=vc_min_cpu, + max_cpu=vc_max_cpu, + min_memory=vc_min_mem, + max_memory=vc_max_mem, labels=shared_utils.label_maker( - constants.VC_CLIENT_TYPE.nimbus, + constants.VC_TYPE.nimbus, constants.CLIENT_TYPES.validator, image, - cl_client_context.client_name, + cl_context.client_name, extra_labels, ), user=User(uid=0, gid=0), diff --git a/src/validator_client/prysm.star b/src/vc/prysm.star similarity index 67% rename from src/validator_client/prysm.star rename to src/vc/prysm.star index e5ed58e4e..a29462685 100644 --- a/src/validator_client/prysm.star +++ b/src/vc/prysm.star @@ -1,6 +1,6 @@ constants = import_module("../package_io/constants.star") shared_utils = import_module("../shared_utils/shared_utils.star") -validator_client_shared = import_module("./shared.star") +vc_shared = import_module("./shared.star") PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER = "/prysm-password" PRYSM_BEACON_RPC_PORT = 4000 @@ -10,14 +10,15 @@ def get_config( el_cl_genesis_data, image, beacon_http_url, - cl_client_context, - el_client_context, + cl_context, + el_context, node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, + vc_min_cpu, + vc_max_cpu, + vc_min_mem, + vc_max_mem, extra_params, + extra_env_vars, extra_labels, prysm_password_relative_filepath, prysm_password_artifact_uuid, @@ -25,7 +26,7 @@ def get_config( node_selectors, ): validator_keys_dirpath = shared_utils.path_join( - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, node_keystore_files.prysm_relative_dirpath, ) validator_secrets_dirpath = shared_utils.path_join( @@ -40,7 +41,7 @@ def get_config( + "/config.yaml", "--beacon-rpc-provider=" + "{}:{}".format( - cl_client_context.ip_addr, + cl_context.ip_addr, PRYSM_BEACON_RPC_PORT, ), "--beacon-rest-api-provider=" + beacon_http_url, @@ -48,19 +49,14 @@ def get_config( "--wallet-password-file=" + validator_secrets_dirpath, "--suggested-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, "--rpc", - "--rpc-port={0}".format(validator_client_shared.VALIDATOR_HTTP_PORT_NUM), + "--rpc-port={0}".format(vc_shared.VALIDATOR_HTTP_PORT_NUM), "--rpc-host=0.0.0.0", # vvvvvvvvvvvvvvvvvvv METRICS CONFIG vvvvvvvvvvvvvvvvvvvvv "--disable-monitoring=false", "--monitoring-host=0.0.0.0", - "--monitoring-port={0}".format( - validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM - ), + "--monitoring-port={0}".format(vc_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM), # ^^^^^^^^^^^^^^^^^^^ METRICS CONFIG ^^^^^^^^^^^^^^^^^^^^^ - "--graffiti=" - + cl_client_context.client_name - + "-" - + el_client_context.client_name, + "--graffiti=" + cl_context.client_name + "-" + el_context.client_name, ] if len(extra_params) > 0: @@ -69,25 +65,26 @@ def get_config( files = { constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, PRYSM_PASSWORD_MOUNT_DIRPATH_ON_SERVICE_CONTAINER: prysm_password_artifact_uuid, } return ServiceConfig( image=image, - ports=validator_client_shared.VALIDATOR_CLIENT_USED_PORTS, + ports=vc_shared.VALIDATOR_CLIENT_USED_PORTS, cmd=cmd, + env_vars=extra_env_vars, files=files, - private_ip_address_placeholder=validator_client_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, - min_cpu=v_min_cpu, - max_cpu=v_max_cpu, - min_memory=v_min_mem, - max_memory=v_max_mem, + private_ip_address_placeholder=vc_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, + min_cpu=vc_min_cpu, + max_cpu=vc_max_cpu, + min_memory=vc_min_mem, + max_memory=vc_max_mem, labels=shared_utils.label_maker( - constants.VC_CLIENT_TYPE.prysm, + constants.VC_TYPE.prysm, constants.CLIENT_TYPES.validator, image, - cl_client_context.client_name, + cl_context.client_name, extra_labels, ), tolerations=tolerations, diff --git a/src/validator_client/shared.star b/src/vc/shared.star similarity index 100% rename from src/validator_client/shared.star rename to src/vc/shared.star diff --git a/src/validator_client/teku.star b/src/vc/teku.star similarity index 69% rename from src/validator_client/teku.star rename to src/vc/teku.star index 2eb69134a..2e3615e51 100644 --- a/src/validator_client/teku.star +++ b/src/vc/teku.star @@ -1,6 +1,6 @@ constants = import_module("../package_io/constants.star") shared_utils = import_module("../shared_utils/shared_utils.star") -validator_client_shared = import_module("./shared.star") +vc_shared = import_module("./shared.star") def get_config( @@ -9,14 +9,15 @@ def get_config( keymanager_p12_file, image, beacon_http_url, - cl_client_context, - el_client_context, + cl_context, + el_context, node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, + vc_min_cpu, + vc_max_cpu, + vc_min_mem, + vc_max_mem, extra_params, + extra_env_vars, extra_labels, tolerations, node_selectors, @@ -25,11 +26,11 @@ def get_config( validator_secrets_dirpath = "" if node_keystore_files != None: validator_keys_dirpath = shared_utils.path_join( - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, node_keystore_files.teku_keys_relative_dirpath, ) validator_secrets_dirpath = shared_utils.path_join( - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT, node_keystore_files.teku_secrets_relative_dirpath, ) @@ -46,14 +47,12 @@ def get_config( "--validators-proposer-default-fee-recipient=" + constants.VALIDATING_REWARDS_ACCOUNT, "--validators-graffiti=" - + cl_client_context.client_name + + cl_context.client_name + "-" - + el_client_context.client_name, + + el_context.client_name, "--validator-api-enabled=true", "--validator-api-host-allowlist=*", - "--validator-api-port={0}".format( - validator_client_shared.VALIDATOR_HTTP_PORT_NUM - ), + "--validator-api-port={0}".format(vc_shared.VALIDATOR_HTTP_PORT_NUM), "--validator-api-interface=0.0.0.0", "--validator-api-keystore-file=" + constants.KEYMANAGER_P12_MOUNT_PATH_ON_CONTAINER, @@ -63,9 +62,7 @@ def get_config( "--metrics-enabled=true", "--metrics-host-allowlist=*", "--metrics-interface=0.0.0.0", - "--metrics-port={0}".format( - validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM - ), + "--metrics-port={0}".format(vc_shared.VALIDATOR_CLIENT_METRICS_PORT_NUM), ] if len(extra_params) > 0: @@ -74,26 +71,27 @@ def get_config( files = { constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data.files_artifact_uuid, - validator_client_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, + vc_shared.VALIDATOR_CLIENT_KEYS_MOUNTPOINT: node_keystore_files.files_artifact_uuid, constants.KEYMANAGER_MOUNT_PATH_ON_CLIENTS: keymanager_file, constants.KEYMANAGER_P12_MOUNT_PATH_ON_CLIENTS: keymanager_p12_file, } return ServiceConfig( image=image, - ports=validator_client_shared.VALIDATOR_CLIENT_USED_PORTS, + ports=vc_shared.VALIDATOR_CLIENT_USED_PORTS, cmd=cmd, + env_vars=extra_env_vars, files=files, - private_ip_address_placeholder=validator_client_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, - min_cpu=v_min_cpu, - max_cpu=v_max_cpu, - min_memory=v_min_mem, - max_memory=v_max_mem, + private_ip_address_placeholder=vc_shared.PRIVATE_IP_ADDRESS_PLACEHOLDER, + min_cpu=vc_min_cpu, + max_cpu=vc_max_cpu, + min_memory=vc_min_mem, + max_memory=vc_max_mem, labels=shared_utils.label_maker( - constants.VC_CLIENT_TYPE.teku, + constants.VC_TYPE.teku, constants.CLIENT_TYPES.validator, image, - cl_client_context.client_name, + cl_context.client_name, extra_labels, ), tolerations=tolerations, diff --git a/src/validator_client/validator_client_context.star b/src/vc/vc_context.star similarity index 84% rename from src/validator_client/validator_client_context.star rename to src/vc/vc_context.star index 07939582c..231a4258d 100644 --- a/src/validator_client/validator_client_context.star +++ b/src/vc/vc_context.star @@ -1,4 +1,4 @@ -def new_validator_client_context( +def new_vc_context( service_name, client_name, metrics_info, diff --git a/src/validator_client/validator_client_launcher.star b/src/vc/vc_launcher.star similarity index 60% rename from src/validator_client/validator_client_launcher.star rename to src/vc/vc_launcher.star index 0e9ab69cb..213c83655 100644 --- a/src/validator_client/validator_client_launcher.star +++ b/src/vc/vc_launcher.star @@ -1,14 +1,14 @@ input_parser = import_module("../package_io/input_parser.star") constants = import_module("../package_io/constants.star") node_metrics = import_module("../node_metrics_info.star") -validator_client_context = import_module("./validator_client_context.star") +vc_context = import_module("./vc_context.star") lighthouse = import_module("./lighthouse.star") lodestar = import_module("./lodestar.star") nimbus = import_module("./nimbus.star") prysm = import_module("./prysm.star") teku = import_module("./teku.star") -validator_client_shared = import_module("./shared.star") +vc_shared = import_module("./shared.star") # The defaults for min/max CPU/memory that the validator client can use MIN_CPU = 50 @@ -23,22 +23,23 @@ def launch( keymanager_file, keymanager_p12_file, service_name, - validator_client_type, + vc_type, image, participant_log_level, global_log_level, - cl_client_context, - el_client_context, + cl_context, + el_context, node_keystore_files, - v_min_cpu, - v_max_cpu, - v_min_mem, - v_max_mem, + vc_min_cpu, + vc_max_cpu, + vc_min_mem, + vc_max_mem, extra_params, + extra_env_vars, extra_labels, prysm_password_relative_filepath, prysm_password_artifact_uuid, - validator_tolerations, + vc_tolerations, participant_tolerations, global_tolerations, node_selectors, @@ -49,113 +50,121 @@ def launch( return None tolerations = input_parser.get_client_tolerations( - validator_tolerations, participant_tolerations, global_tolerations + vc_tolerations, participant_tolerations, global_tolerations ) beacon_http_url = "http://{}:{}".format( - cl_client_context.ip_addr, - cl_client_context.http_port_num, + cl_context.ip_addr, + cl_context.http_port_num, ) - v_min_cpu = int(v_min_cpu) if int(v_min_cpu) > 0 else MIN_CPU - v_max_cpu = int(v_max_cpu) if int(v_max_cpu) > 0 else MAX_CPU - v_min_mem = int(v_min_mem) if int(v_min_mem) > 0 else MIN_MEMORY - v_max_mem = int(v_max_mem) if int(v_max_mem) > 0 else MAX_MEMORY + vc_min_cpu = int(vc_min_cpu) if int(vc_min_cpu) > 0 else MIN_CPU + vc_max_cpu = int(vc_max_cpu) if int(vc_max_cpu) > 0 else MAX_CPU + vc_min_mem = int(vc_min_mem) if int(vc_min_mem) > 0 else MIN_MEMORY + vc_max_mem = int(vc_max_mem) if int(vc_max_mem) > 0 else MAX_MEMORY - if validator_client_type == constants.VC_CLIENT_TYPE.lighthouse: + if vc_type == constants.VC_TYPE.lighthouse: config = lighthouse.get_config( el_cl_genesis_data=launcher.el_cl_genesis_data, image=image, participant_log_level=participant_log_level, global_log_level=global_log_level, beacon_http_url=beacon_http_url, - cl_client_context=cl_client_context, - el_client_context=el_client_context, + cl_context=cl_context, + el_context=el_context, node_keystore_files=node_keystore_files, - v_min_cpu=v_min_cpu, - v_max_cpu=v_max_cpu, - v_min_mem=v_min_mem, - v_max_mem=v_max_mem, + vc_min_cpu=vc_min_cpu, + vc_max_cpu=vc_max_cpu, + vc_min_mem=vc_min_mem, + vc_max_mem=vc_max_mem, extra_params=extra_params, + extra_env_vars=extra_env_vars, extra_labels=extra_labels, tolerations=tolerations, node_selectors=node_selectors, network=network, # TODO: remove when deneb rebase is done electra_fork_epoch=electra_fork_epoch, # TODO: remove when deneb rebase is done ) - elif validator_client_type == constants.VC_CLIENT_TYPE.lodestar: + elif vc_type == constants.VC_TYPE.lodestar: config = lodestar.get_config( el_cl_genesis_data=launcher.el_cl_genesis_data, image=image, participant_log_level=participant_log_level, global_log_level=global_log_level, beacon_http_url=beacon_http_url, - cl_client_context=cl_client_context, - el_client_context=el_client_context, + cl_context=cl_context, + el_context=el_context, node_keystore_files=node_keystore_files, - v_min_cpu=v_min_cpu, - v_max_cpu=v_max_cpu, - v_min_mem=v_min_mem, - v_max_mem=v_max_mem, + vc_min_cpu=vc_min_cpu, + vc_max_cpu=vc_max_cpu, + vc_min_mem=vc_min_mem, + vc_max_mem=vc_max_mem, extra_params=extra_params, + extra_env_vars=extra_env_vars, extra_labels=extra_labels, tolerations=tolerations, node_selectors=node_selectors, ) - elif validator_client_type == constants.VC_CLIENT_TYPE.teku: + elif vc_type == constants.VC_TYPE.teku: config = teku.get_config( el_cl_genesis_data=launcher.el_cl_genesis_data, keymanager_file=keymanager_file, keymanager_p12_file=keymanager_p12_file, image=image, beacon_http_url=beacon_http_url, - cl_client_context=cl_client_context, - el_client_context=el_client_context, + cl_context=cl_context, + el_context=el_context, node_keystore_files=node_keystore_files, - v_min_cpu=v_min_cpu, - v_max_cpu=v_max_cpu, - v_min_mem=v_min_mem, - v_max_mem=v_max_mem, + vc_min_cpu=vc_min_cpu, + vc_max_cpu=vc_max_cpu, + vc_min_mem=vc_min_mem, + vc_max_mem=vc_max_mem, extra_params=extra_params, + extra_env_vars=extra_env_vars, extra_labels=extra_labels, tolerations=tolerations, node_selectors=node_selectors, ) - elif validator_client_type == constants.VC_CLIENT_TYPE.nimbus: + elif vc_type == constants.VC_TYPE.nimbus: config = nimbus.get_config( el_cl_genesis_data=launcher.el_cl_genesis_data, keymanager_file=keymanager_file, image=image, beacon_http_url=beacon_http_url, - cl_client_context=cl_client_context, - el_client_context=el_client_context, + cl_context=cl_context, + el_context=el_context, node_keystore_files=node_keystore_files, - v_min_cpu=v_min_cpu, - v_max_cpu=v_max_cpu, - v_min_mem=v_min_mem, - v_max_mem=v_max_mem, + vc_min_cpu=vc_min_cpu, + vc_max_cpu=vc_max_cpu, + vc_min_mem=vc_min_mem, + vc_max_mem=vc_max_mem, extra_params=extra_params, + extra_env_vars=extra_env_vars, extra_labels=extra_labels, tolerations=tolerations, node_selectors=node_selectors, ) - elif validator_client_type == constants.VC_CLIENT_TYPE.prysm: + elif vc_type == constants.VC_TYPE.prysm: # Prysm VC only works with Prysm beacon node right now - if cl_client_context.client_name != constants.CL_CLIENT_TYPE.prysm: - fail("Prysm VC is only compatible with Prysm beacon node") + if cl_context.client_name != constants.CL_TYPE.prysm: + fail( + cl_context.client_name + + "Prysm VC is only compatible with Prysm beacon node" + ) config = prysm.get_config( el_cl_genesis_data=launcher.el_cl_genesis_data, image=image, beacon_http_url=beacon_http_url, - cl_client_context=cl_client_context, - el_client_context=el_client_context, + cl_context=cl_context, + el_context=el_context, node_keystore_files=node_keystore_files, - v_min_cpu=v_min_cpu, - v_max_cpu=v_max_cpu, - v_min_mem=v_min_mem, - v_max_mem=v_max_mem, + vc_min_cpu=vc_min_cpu, + vc_max_cpu=vc_max_cpu, + vc_min_mem=vc_min_mem, + vc_max_mem=vc_max_mem, extra_params=extra_params, + extra_env_vars=extra_env_vars, extra_labels=extra_labels, prysm_password_relative_filepath=prysm_password_relative_filepath, prysm_password_artifact_uuid=prysm_password_artifact_uuid, @@ -163,30 +172,28 @@ def launch( node_selectors=node_selectors, ) else: - fail("Unsupported validator_client_type: {0}".format(validator_client_type)) + fail("Unsupported vc_type: {0}".format(vc_type)) validator_service = plan.add_service(service_name, config) validator_metrics_port = validator_service.ports[ - validator_client_shared.VALIDATOR_CLIENT_METRICS_PORT_ID + vc_shared.VALIDATOR_CLIENT_METRICS_PORT_ID ] validator_metrics_url = "{0}:{1}".format( validator_service.ip_address, validator_metrics_port.number ) validator_node_metrics_info = node_metrics.new_node_metrics_info( - service_name, validator_client_shared.METRICS_PATH, validator_metrics_url + service_name, vc_shared.METRICS_PATH, validator_metrics_url ) - validator_http_port = validator_service.ports[ - validator_client_shared.VALIDATOR_HTTP_PORT_ID - ] + validator_http_port = validator_service.ports[vc_shared.VALIDATOR_HTTP_PORT_ID] - return validator_client_context.new_validator_client_context( + return vc_context.new_vc_context( service_name=service_name, - client_name=validator_client_type, + client_name=vc_type, metrics_info=validator_node_metrics_info, ) -def new_validator_client_launcher(el_cl_genesis_data): +def new_vc_launcher(el_cl_genesis_data): return struct(el_cl_genesis_data=el_cl_genesis_data) diff --git a/src/xatu_sentry/xatu_sentry_launcher.star b/src/xatu_sentry/xatu_sentry_launcher.star index 39077ec37..c0964ee4b 100644 --- a/src/xatu_sentry/xatu_sentry_launcher.star +++ b/src/xatu_sentry/xatu_sentry_launcher.star @@ -18,7 +18,7 @@ MAX_MEMORY = 1024 def launch( plan, xatu_sentry_service_name, - cl_client_context, + cl_context, xatu_sentry_params, network_params, pair_name, @@ -30,8 +30,8 @@ def launch( str(METRICS_PORT_NUMBER), pair_name, "http://{}:{}".format( - cl_client_context.ip_addr, - cl_client_context.http_port_num, + cl_context.ip_addr, + cl_context.http_port_num, ), xatu_sentry_params.xatu_server_addr, network_params.network, From a96953d05da156006ec82374e9a3fbfabf85635e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 8 Mar 2024 12:29:43 +0100 Subject: [PATCH 33/33] chore(main): release 2.0.0 (#493) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit :robot: I have created a release *beep* *boop* --- ## [2.0.0](https://github.com/kurtosis-tech/ethereum-package/compare/1.4.0...2.0.0) (2024-03-08) ### ⚠ BREAKING CHANGES * participant_network & rename participant fields. ([#508](https://github.com/kurtosis-tech/ethereum-package/issues/508)) * add node selectors features ([#491](https://github.com/kurtosis-tech/ethereum-package/issues/491)) ### Features * add keymanager to all validator processes ([#502](https://github.com/kurtosis-tech/ethereum-package/issues/502)) ([836eda4](https://github.com/kurtosis-tech/ethereum-package/commit/836eda4eed3776dd406d354343655c0ff8b9d2b6)) * add nimbus-eth1 ([#496](https://github.com/kurtosis-tech/ethereum-package/issues/496)) ([d599729](https://github.com/kurtosis-tech/ethereum-package/commit/d599729295aa3274d23e4e8e99b56288cde3fc04)) * add node selectors features ([#491](https://github.com/kurtosis-tech/ethereum-package/issues/491)) ([316d42f](https://github.com/kurtosis-tech/ethereum-package/commit/316d42fbaeb2d7bc1d580823a6c70b1c2dfe3746)) * allow more detailed additional test configurations in assertoor_params ([#498](https://github.com/kurtosis-tech/ethereum-package/issues/498)) ([fe2de7e](https://github.com/kurtosis-tech/ethereum-package/commit/fe2de7e5a5e2446ebb0a0b191f5aa6783e132426)) * enable api in assertoor config ([#495](https://github.com/kurtosis-tech/ethereum-package/issues/495)) ([9ceae9c](https://github.com/kurtosis-tech/ethereum-package/commit/9ceae9c74405db4e1ab6e02de541577d078434ae)) * enable dencun-genesis ([#500](https://github.com/kurtosis-tech/ethereum-package/issues/500)) ([beb764f](https://github.com/kurtosis-tech/ethereum-package/commit/beb764fb9a18fcb09cb7d3d9ee48e4826595512d)) * make snapshot url configurable ([#507](https://github.com/kurtosis-tech/ethereum-package/issues/507)) ([6fa0475](https://github.com/kurtosis-tech/ethereum-package/commit/6fa04751cd1277a4870dc45144e15ffa5d637b93)) * parameterize mev-boost args ([#400](https://github.com/kurtosis-tech/ethereum-package/issues/400)) ([e48483a](https://github.com/kurtosis-tech/ethereum-package/commit/e48483a130ba227dafd0d0fd9ee66c6cecc3bfce)) * separate validator clients from CL clients ([#497](https://github.com/kurtosis-tech/ethereum-package/issues/497)) ([90da2c3](https://github.com/kurtosis-tech/ethereum-package/commit/90da2c33a77b4a0ac620ae665899963256a1ae0a)) ### Bug Fixes * fix end index in validator ranges file ([#509](https://github.com/kurtosis-tech/ethereum-package/issues/509)) ([da55be8](https://github.com/kurtosis-tech/ethereum-package/commit/da55be84861e93ce777076e545abee35ff2d51ce)) * lh vc flag logic ([#506](https://github.com/kurtosis-tech/ethereum-package/issues/506)) ([bc5e725](https://github.com/kurtosis-tech/ethereum-package/commit/bc5e725edf8c917d409e6de6ce838797ad166173)) * nimbus-eth1 advertise proper extip ([#501](https://github.com/kurtosis-tech/ethereum-package/issues/501)) ([1d5a779](https://github.com/kurtosis-tech/ethereum-package/commit/1d5a7792c8175d1fc85e424b5ddf60baec551821)) * README global node selector ([#504](https://github.com/kurtosis-tech/ethereum-package/issues/504)) ([f9343a2](https://github.com/kurtosis-tech/ethereum-package/commit/f9343a2914456196e1209336c426b6ad44958428)) * use the cl as the default validator image if none are defined ([#503](https://github.com/kurtosis-tech/ethereum-package/issues/503)) ([181dd04](https://github.com/kurtosis-tech/ethereum-package/commit/181dd04c2db17c58cb9370b0d24e12e4c191a13d)) ### Code Refactoring * participant_network & rename participant fields. ([#508](https://github.com/kurtosis-tech/ethereum-package/issues/508)) ([fab341b](https://github.com/kurtosis-tech/ethereum-package/commit/fab341b158329b9e8c2b590dc63127dfd1d2495f)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 34 ++++++++++++++++++++++++++++++++++ version.txt | 2 +- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9211c8b72..f0807a983 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,39 @@ # Changelog +## [2.0.0](https://github.com/kurtosis-tech/ethereum-package/compare/1.4.0...2.0.0) (2024-03-08) + + +### ⚠ BREAKING CHANGES + +* participant_network & rename participant fields. ([#508](https://github.com/kurtosis-tech/ethereum-package/issues/508)) +* add node selectors features ([#491](https://github.com/kurtosis-tech/ethereum-package/issues/491)) + +### Features + +* add keymanager to all validator processes ([#502](https://github.com/kurtosis-tech/ethereum-package/issues/502)) ([836eda4](https://github.com/kurtosis-tech/ethereum-package/commit/836eda4eed3776dd406d354343655c0ff8b9d2b6)) +* add nimbus-eth1 ([#496](https://github.com/kurtosis-tech/ethereum-package/issues/496)) ([d599729](https://github.com/kurtosis-tech/ethereum-package/commit/d599729295aa3274d23e4e8e99b56288cde3fc04)) +* add node selectors features ([#491](https://github.com/kurtosis-tech/ethereum-package/issues/491)) ([316d42f](https://github.com/kurtosis-tech/ethereum-package/commit/316d42fbaeb2d7bc1d580823a6c70b1c2dfe3746)) +* allow more detailed additional test configurations in assertoor_params ([#498](https://github.com/kurtosis-tech/ethereum-package/issues/498)) ([fe2de7e](https://github.com/kurtosis-tech/ethereum-package/commit/fe2de7e5a5e2446ebb0a0b191f5aa6783e132426)) +* enable api in assertoor config ([#495](https://github.com/kurtosis-tech/ethereum-package/issues/495)) ([9ceae9c](https://github.com/kurtosis-tech/ethereum-package/commit/9ceae9c74405db4e1ab6e02de541577d078434ae)) +* enable dencun-genesis ([#500](https://github.com/kurtosis-tech/ethereum-package/issues/500)) ([beb764f](https://github.com/kurtosis-tech/ethereum-package/commit/beb764fb9a18fcb09cb7d3d9ee48e4826595512d)) +* make snapshot url configurable ([#507](https://github.com/kurtosis-tech/ethereum-package/issues/507)) ([6fa0475](https://github.com/kurtosis-tech/ethereum-package/commit/6fa04751cd1277a4870dc45144e15ffa5d637b93)) +* parameterize mev-boost args ([#400](https://github.com/kurtosis-tech/ethereum-package/issues/400)) ([e48483a](https://github.com/kurtosis-tech/ethereum-package/commit/e48483a130ba227dafd0d0fd9ee66c6cecc3bfce)) +* separate validator clients from CL clients ([#497](https://github.com/kurtosis-tech/ethereum-package/issues/497)) ([90da2c3](https://github.com/kurtosis-tech/ethereum-package/commit/90da2c33a77b4a0ac620ae665899963256a1ae0a)) + + +### Bug Fixes + +* fix end index in validator ranges file ([#509](https://github.com/kurtosis-tech/ethereum-package/issues/509)) ([da55be8](https://github.com/kurtosis-tech/ethereum-package/commit/da55be84861e93ce777076e545abee35ff2d51ce)) +* lh vc flag logic ([#506](https://github.com/kurtosis-tech/ethereum-package/issues/506)) ([bc5e725](https://github.com/kurtosis-tech/ethereum-package/commit/bc5e725edf8c917d409e6de6ce838797ad166173)) +* nimbus-eth1 advertise proper extip ([#501](https://github.com/kurtosis-tech/ethereum-package/issues/501)) ([1d5a779](https://github.com/kurtosis-tech/ethereum-package/commit/1d5a7792c8175d1fc85e424b5ddf60baec551821)) +* README global node selector ([#504](https://github.com/kurtosis-tech/ethereum-package/issues/504)) ([f9343a2](https://github.com/kurtosis-tech/ethereum-package/commit/f9343a2914456196e1209336c426b6ad44958428)) +* use the cl as the default validator image if none are defined ([#503](https://github.com/kurtosis-tech/ethereum-package/issues/503)) ([181dd04](https://github.com/kurtosis-tech/ethereum-package/commit/181dd04c2db17c58cb9370b0d24e12e4c191a13d)) + + +### Code Refactoring + +* participant_network & rename participant fields. ([#508](https://github.com/kurtosis-tech/ethereum-package/issues/508)) ([fab341b](https://github.com/kurtosis-tech/ethereum-package/commit/fab341b158329b9e8c2b590dc63127dfd1d2495f)) + ## [1.4.0](https://github.com/kurtosis-tech/ethereum-package/compare/1.3.0...1.4.0) (2024-02-09) diff --git a/version.txt b/version.txt index 88c5fb891..227cea215 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -1.4.0 +2.0.0