mirror of
https://github.com/ChainSafe/lodestar.git
synced 2026-01-08 23:28:10 -05:00
Merge branch 'unstable' into nh/migrate-pnpm
This commit is contained in:
@@ -1,7 +1,5 @@
|
||||
# We use these images during sim and e2e tests
|
||||
# This is the last version which supports pre/post merge chains in the same network
|
||||
# All newer versions only work with post merge chains
|
||||
GETH_DOCKER_IMAGE=ethereum/client-go:v1.16.2
|
||||
GETH_DOCKER_IMAGE=ethereum/client-go:v1.16.7
|
||||
# Use either image or local binary for the testing
|
||||
GETH_BINARY_DIR=
|
||||
LIGHTHOUSE_DOCKER_IMAGE=ethpandaops/lighthouse:unstable-d235f2c
|
||||
|
||||
58
.github/workflows/test-sim-merge.yml
vendored
58
.github/workflows/test-sim-merge.yml
vendored
@@ -1,58 +0,0 @@
|
||||
name: Sim merge execution/builder tests
|
||||
|
||||
concurrency:
|
||||
# If PR, cancel prev commits. head_ref = source branch name on pull_request, null if push
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
# We intentionally don't run push on feature branches. See PR for rational.
|
||||
branches: [unstable, stable]
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
GETH_IMAGE: ethereum/client-go:v1.10.25
|
||||
NETHERMIND_IMAGE: nethermind/nethermind:1.14.3
|
||||
MERGEMOCK_IMAGE: g11tech/mergemock:latest
|
||||
GETH_WITHDRAWALS_IMAGE: g11tech/geth:withdrawalsfeb8
|
||||
ETHEREUMJS_WITHDRAWALS_IMAGE: g11tech/ethereumjs:blobs-b6b63
|
||||
NETHERMIND_WITHDRAWALS_IMAGE: nethermindeth/nethermind:withdrawals_yolo
|
||||
ETHEREUMJS_BLOBS_IMAGE: g11tech/ethereumjs:blobs-b6b63
|
||||
|
||||
jobs:
|
||||
sim-merge-tests:
|
||||
name: Sim merge tests
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: "./.github/actions/setup-and-build"
|
||||
with:
|
||||
node: 24
|
||||
|
||||
- name: Pull Geth
|
||||
run: docker pull $GETH_IMAGE
|
||||
|
||||
- name: Pull Nethermind
|
||||
run: docker pull $NETHERMIND_IMAGE
|
||||
|
||||
- name: Pull mergemock
|
||||
run: docker pull $MERGEMOCK_IMAGE
|
||||
|
||||
- name: Test Lodestar <> mergemock relay
|
||||
run: pnpm test:sim:mergemock
|
||||
working-directory: packages/beacon-node
|
||||
env:
|
||||
EL_BINARY_DIR: ${{ env.MERGEMOCK_IMAGE }}
|
||||
EL_SCRIPT_DIR: mergemock
|
||||
LODESTAR_PRESET: mainnet
|
||||
ENGINE_PORT: 8551
|
||||
ETH_PORT: 8661
|
||||
|
||||
- name: Upload debug log test files
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: debug-test-logs
|
||||
path: packages/beacon-node/test-logs
|
||||
@@ -273,7 +273,6 @@
|
||||
"**/packages/beacon-node/src/db/buckets.ts",
|
||||
"**/packages/beacon-node/src/execution/engine/mock.ts",
|
||||
"**/packages/beacon-node/src/execution/engine/types.ts",
|
||||
"**/packages/beacon-node/src/eth1/provider/eth1Provider.ts",
|
||||
"**/packages/validator/src/buckets.ts",
|
||||
"**/packages/prover/src/types.ts",
|
||||
"**/prover/src/utils/process.ts",
|
||||
|
||||
@@ -212,19 +212,6 @@
|
||||
"range": true,
|
||||
"refId": "attestations"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rate(beacon_block_production_execution_steps_seconds_sum{step=\"eth1DataAndDeposits\"}[$rate_interval])\n/\nrate(beacon_block_production_execution_steps_seconds_count{step=\"eth1DataAndDeposits\"}[$rate_interval])",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "{{step}}",
|
||||
"range": true,
|
||||
"refId": "eth1DataAndDeposits"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
@@ -388,19 +375,6 @@
|
||||
"range": true,
|
||||
"refId": "attestations"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rate(beacon_block_production_builder_steps_seconds_sum{step=\"eth1DataAndDeposits\"}[$rate_interval])\n/\nrate(beacon_block_production_builder_steps_seconds_count{step=\"eth1DataAndDeposits\"}[$rate_interval])",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "{{step}}",
|
||||
"range": true,
|
||||
"refId": "eth1DataAndDeposits"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1825,18 +1825,6 @@
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "rate(lodestar_eth1_http_client_request_time_seconds_sum{routeId=\"getBlockNumber\"}[$rate_interval])\n/\nrate(lodestar_eth1_http_client_request_time_seconds_count{routeId=\"getBlockNumber\"}[$rate_interval])",
|
||||
"hide": false,
|
||||
"interval": "",
|
||||
"legendFormat": "eth1_getBlockNumber_roundtrip",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
|
||||
@@ -101,4 +101,3 @@ To set up a local testnet with a Post-Merge configuration, you may need to add t
|
||||
|
||||
- `--params.ALTAIR_FORK_EPOCH 0`
|
||||
- `--params.BELLATRIX_FORK_EPOCH 0`
|
||||
- `--terminal-total-difficulty-override 0`
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
"packages": [
|
||||
"packages/*"
|
||||
],
|
||||
"npmClient": "pnpm",
|
||||
"version": "1.37.0",
|
||||
"stream": true,
|
||||
"command": {
|
||||
|
||||
@@ -47,6 +47,7 @@
|
||||
"@chainsafe/biomejs-config": "^1.0.0",
|
||||
"@lodestar/params": "workspace:",
|
||||
"@lerna-lite/cli": "^4.9.4",
|
||||
"@lerna-lite/exec": "^4.9.4",
|
||||
"@lerna-lite/publish": "^4.9.4",
|
||||
"@lerna-lite/run": "^4.9.4",
|
||||
"@lerna-lite/version": "^4.9.4",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"bugs": {
|
||||
"url": "https://github.com/ChainSafe/lodestar/issues"
|
||||
},
|
||||
"version": "1.37.0",
|
||||
"version": "1.38.0",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": {
|
||||
|
||||
@@ -13,13 +13,6 @@ export {block, pool, state, rewards};
|
||||
|
||||
export type {BlockHeaderResponse, BlockId} from "./block.js";
|
||||
export {BroadcastValidation} from "./block.js";
|
||||
export type {
|
||||
AttestationsRewards,
|
||||
BlockRewards,
|
||||
IdealAttestationsReward,
|
||||
SyncCommitteeRewards,
|
||||
TotalAttestationsReward,
|
||||
} from "./rewards.js";
|
||||
// TODO: Review if re-exporting all these types is necessary
|
||||
export type {
|
||||
EpochCommitteeResponse,
|
||||
|
||||
@@ -2,6 +2,7 @@ import {ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ForkPostElectra, ForkPreElectra, isForkPostElectra} from "@lodestar/params";
|
||||
import {
|
||||
ArrayOf,
|
||||
AttesterSlashing,
|
||||
CommitteeIndex,
|
||||
SingleAttestation,
|
||||
@@ -12,7 +13,6 @@ import {
|
||||
ssz,
|
||||
} from "@lodestar/types";
|
||||
import {
|
||||
ArrayOf,
|
||||
EmptyArgs,
|
||||
EmptyMeta,
|
||||
EmptyMetaCodec,
|
||||
|
||||
@@ -1,112 +1,12 @@
|
||||
import {ContainerType, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {Epoch, ssz} from "@lodestar/types";
|
||||
import {ArrayOf, JsonOnlyReq} from "../../../utils/codecs.js";
|
||||
import {Epoch, rewards} from "@lodestar/types";
|
||||
import {JsonOnlyReq} from "../../../utils/codecs.js";
|
||||
import {Endpoint, RouteDefinitions, Schema} from "../../../utils/index.js";
|
||||
import {ExecutionOptimisticAndFinalizedCodec, ExecutionOptimisticAndFinalizedMeta} from "../../../utils/metadata.js";
|
||||
import {fromValidatorIdsStr, toValidatorIdsStr} from "../../../utils/serdes.js";
|
||||
import {BlockArgs} from "./block.js";
|
||||
import {ValidatorId} from "./state.js";
|
||||
|
||||
const BlockRewardsType = new ContainerType(
|
||||
{
|
||||
/** Proposer of the block, the proposer index who receives these rewards */
|
||||
proposerIndex: ssz.ValidatorIndex,
|
||||
/** Total block reward, equal to attestations + sync_aggregate + proposer_slashings + attester_slashings */
|
||||
total: ssz.UintNum64,
|
||||
/** Block reward component due to included attestations */
|
||||
attestations: ssz.UintNum64,
|
||||
/** Block reward component due to included sync_aggregate */
|
||||
syncAggregate: ssz.UintNum64,
|
||||
/** Block reward component due to included proposer_slashings */
|
||||
proposerSlashings: ssz.UintNum64,
|
||||
/** Block reward component due to included attester_slashings */
|
||||
attesterSlashings: ssz.UintNum64,
|
||||
},
|
||||
{jsonCase: "eth2"}
|
||||
);
|
||||
|
||||
const AttestationsRewardType = new ContainerType(
|
||||
{
|
||||
/** Reward for head vote. Could be negative to indicate penalty */
|
||||
head: ssz.UintNum64,
|
||||
/** Reward for target vote. Could be negative to indicate penalty */
|
||||
target: ssz.UintNum64,
|
||||
/** Reward for source vote. Could be negative to indicate penalty */
|
||||
source: ssz.UintNum64,
|
||||
/** Inclusion delay reward (phase0 only) */
|
||||
inclusionDelay: ssz.UintNum64,
|
||||
/** Inactivity penalty. Should be a negative number to indicate penalty */
|
||||
inactivity: ssz.UintNum64,
|
||||
},
|
||||
{jsonCase: "eth2"}
|
||||
);
|
||||
|
||||
const IdealAttestationsRewardsType = new ContainerType(
|
||||
{
|
||||
...AttestationsRewardType.fields,
|
||||
effectiveBalance: ssz.UintNum64,
|
||||
},
|
||||
{jsonCase: "eth2"}
|
||||
);
|
||||
|
||||
const TotalAttestationsRewardsType = new ContainerType(
|
||||
{
|
||||
...AttestationsRewardType.fields,
|
||||
validatorIndex: ssz.ValidatorIndex,
|
||||
},
|
||||
{jsonCase: "eth2"}
|
||||
);
|
||||
|
||||
const AttestationsRewardsType = new ContainerType(
|
||||
{
|
||||
idealRewards: ArrayOf(IdealAttestationsRewardsType),
|
||||
totalRewards: ArrayOf(TotalAttestationsRewardsType),
|
||||
},
|
||||
{jsonCase: "eth2"}
|
||||
);
|
||||
|
||||
const SyncCommitteeRewardsType = ArrayOf(
|
||||
new ContainerType(
|
||||
{
|
||||
validatorIndex: ssz.ValidatorIndex,
|
||||
reward: ssz.UintNum64,
|
||||
},
|
||||
{jsonCase: "eth2"}
|
||||
)
|
||||
);
|
||||
|
||||
/**
|
||||
* Rewards info for a single block. Every reward value is in Gwei.
|
||||
*/
|
||||
export type BlockRewards = ValueOf<typeof BlockRewardsType>;
|
||||
|
||||
/**
|
||||
* Rewards for a single set of (ideal or actual depending on usage) attestations. Reward value is in Gwei
|
||||
*/
|
||||
export type AttestationsReward = ValueOf<typeof AttestationsRewardType>;
|
||||
|
||||
/**
|
||||
* Rewards info for ideal attestations ie. Maximum rewards could be earned by making timely head, target and source vote.
|
||||
* `effectiveBalance` is in Gwei
|
||||
*/
|
||||
export type IdealAttestationsReward = ValueOf<typeof IdealAttestationsRewardsType>;
|
||||
|
||||
/**
|
||||
* Rewards info for actual attestations
|
||||
*/
|
||||
export type TotalAttestationsReward = ValueOf<typeof TotalAttestationsRewardsType>;
|
||||
|
||||
export type AttestationsRewards = ValueOf<typeof AttestationsRewardsType>;
|
||||
|
||||
/**
|
||||
* Rewards info for sync committee participation. Every reward value is in Gwei.
|
||||
* Note: In the case that block proposer is present in `SyncCommitteeRewards`, the reward value only reflects rewards for
|
||||
* participating in sync committee. Please refer to `BlockRewards.syncAggregate` for rewards of proposer including sync committee
|
||||
* outputs into their block
|
||||
*/
|
||||
export type SyncCommitteeRewards = ValueOf<typeof SyncCommitteeRewardsType>;
|
||||
|
||||
export type Endpoints = {
|
||||
/**
|
||||
* Get block rewards
|
||||
@@ -116,7 +16,7 @@ export type Endpoints = {
|
||||
"GET",
|
||||
BlockArgs,
|
||||
{params: {block_id: string}},
|
||||
BlockRewards,
|
||||
rewards.BlockRewards,
|
||||
ExecutionOptimisticAndFinalizedMeta
|
||||
>;
|
||||
|
||||
@@ -133,7 +33,7 @@ export type Endpoints = {
|
||||
validatorIds?: ValidatorId[];
|
||||
},
|
||||
{params: {epoch: number}; body: string[]},
|
||||
AttestationsRewards,
|
||||
rewards.AttestationsRewards,
|
||||
ExecutionOptimisticAndFinalizedMeta
|
||||
>;
|
||||
|
||||
@@ -148,7 +48,7 @@ export type Endpoints = {
|
||||
validatorIds?: ValidatorId[];
|
||||
},
|
||||
{params: {block_id: string}; body: string[]},
|
||||
SyncCommitteeRewards,
|
||||
rewards.SyncCommitteeRewards,
|
||||
ExecutionOptimisticAndFinalizedMeta
|
||||
>;
|
||||
};
|
||||
@@ -164,7 +64,7 @@ export function getDefinitions(_config: ChainForkConfig): RouteDefinitions<Endpo
|
||||
schema: {params: {block_id: Schema.StringRequired}},
|
||||
},
|
||||
resp: {
|
||||
data: BlockRewardsType,
|
||||
data: rewards.BlockRewardsType,
|
||||
meta: ExecutionOptimisticAndFinalizedCodec,
|
||||
},
|
||||
},
|
||||
@@ -186,7 +86,7 @@ export function getDefinitions(_config: ChainForkConfig): RouteDefinitions<Endpo
|
||||
},
|
||||
}),
|
||||
resp: {
|
||||
data: AttestationsRewardsType,
|
||||
data: rewards.AttestationsRewardsType,
|
||||
meta: ExecutionOptimisticAndFinalizedCodec,
|
||||
},
|
||||
},
|
||||
@@ -208,7 +108,7 @@ export function getDefinitions(_config: ChainForkConfig): RouteDefinitions<Endpo
|
||||
},
|
||||
}),
|
||||
resp: {
|
||||
data: SyncCommitteeRewardsType,
|
||||
data: rewards.SyncCommitteeRewardsType,
|
||||
meta: ExecutionOptimisticAndFinalizedCodec,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -2,6 +2,7 @@ import {ContainerType, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {MAX_VALIDATORS_PER_COMMITTEE} from "@lodestar/params";
|
||||
import {
|
||||
ArrayOf,
|
||||
CommitteeIndex,
|
||||
Epoch,
|
||||
RootHex,
|
||||
@@ -13,7 +14,7 @@ import {
|
||||
phase0,
|
||||
ssz,
|
||||
} from "@lodestar/types";
|
||||
import {ArrayOf, JsonOnlyReq} from "../../../utils/codecs.js";
|
||||
import {JsonOnlyReq} from "../../../utils/codecs.js";
|
||||
import {Endpoint, RequestCodec, RouteDefinitions, Schema} from "../../../utils/index.js";
|
||||
import {
|
||||
ExecutionOptimisticAndFinalizedCodec,
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import {ContainerType, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig, SpecJson} from "@lodestar/config";
|
||||
import {ssz} from "@lodestar/types";
|
||||
import {ArrayOf, ssz} from "@lodestar/types";
|
||||
import {
|
||||
ArrayOf,
|
||||
EmptyArgs,
|
||||
EmptyMeta,
|
||||
EmptyMetaCodec,
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import {ContainerType, Type, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {BeaconState, StringType, fulu, ssz} from "@lodestar/types";
|
||||
import {ArrayOf, BeaconState, StringType, fulu, ssz} from "@lodestar/types";
|
||||
import {
|
||||
ArrayOf,
|
||||
EmptyArgs,
|
||||
EmptyMeta,
|
||||
EmptyMetaCodec,
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import {ContainerType, Type, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {BeaconState, Epoch, RootHex, Slot, ssz} from "@lodestar/types";
|
||||
import {ArrayOf, BeaconState, Epoch, RootHex, Slot, ssz} from "@lodestar/types";
|
||||
import {
|
||||
ArrayOf,
|
||||
EmptyArgs,
|
||||
EmptyMeta,
|
||||
EmptyRequest,
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
import {ContainerType, OptionalType, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {StringType, fulu, ssz, stringType} from "@lodestar/types";
|
||||
import {ArrayOf, StringType, fulu, ssz, stringType} from "@lodestar/types";
|
||||
import {
|
||||
ArrayOf,
|
||||
EmptyArgs,
|
||||
EmptyMeta,
|
||||
EmptyMetaCodec,
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
import {CompactMultiProof, ProofType} from "@chainsafe/persistent-merkle-tree";
|
||||
import {ByteListType, ContainerType} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ssz} from "@lodestar/types";
|
||||
import {ArrayOf, ssz} from "@lodestar/types";
|
||||
import {fromHex, toHex} from "@lodestar/utils";
|
||||
import {ArrayOf} from "../../utils/codecs.js";
|
||||
import {Endpoint, RouteDefinitions, Schema} from "../../utils/index.js";
|
||||
import {VersionCodec, VersionMeta} from "../../utils/metadata.js";
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
isForkPostElectra,
|
||||
} from "@lodestar/params";
|
||||
import {
|
||||
ArrayOf,
|
||||
Attestation,
|
||||
BLSSignature,
|
||||
BeaconBlock,
|
||||
@@ -28,7 +29,6 @@ import {
|
||||
} from "@lodestar/types";
|
||||
import {fromHex, toHex, toRootHex} from "@lodestar/utils";
|
||||
import {
|
||||
ArrayOf,
|
||||
EmptyMeta,
|
||||
EmptyMetaCodec,
|
||||
EmptyResponseCodec,
|
||||
@@ -498,6 +498,10 @@ export type Endpoints = {
|
||||
* a validator client to correctly determine if one of its validators has been selected to
|
||||
* perform an aggregation duty in this slot.
|
||||
*
|
||||
* Validator clients running in a distributed validator cluster must query this endpoint
|
||||
* at the start of an epoch for the current and lookahead (next) epochs for all validators
|
||||
* that have attester duties in the current and lookahead epochs.
|
||||
*
|
||||
* Note that this endpoint is not implemented by the beacon node and will return a 501 error
|
||||
*
|
||||
* Returns an array of threshold aggregated beacon committee selection proofs
|
||||
@@ -521,6 +525,9 @@ export type Endpoints = {
|
||||
* a validator client to correctly determine if one of its validators has been selected to
|
||||
* perform a sync committee contribution (sync aggregation) duty in this slot.
|
||||
*
|
||||
* Validator clients running in a distributed validator cluster must query this endpoint
|
||||
* at the start of each slot for all validators that are included in the current sync committee.
|
||||
*
|
||||
* Note that this endpoint is not implemented by the beacon node and will return a 501 error
|
||||
*
|
||||
* Returns an array of threshold aggregated sync committee selection proofs
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ForkName, VALIDATOR_REGISTRY_LIMIT, isForkPostDeneb} from "@lodestar/params";
|
||||
import {
|
||||
ArrayOf,
|
||||
BLSPubkey,
|
||||
ExecutionPayload,
|
||||
ExecutionPayloadAndBlobsBundle,
|
||||
@@ -14,7 +15,6 @@ import {
|
||||
} from "@lodestar/types";
|
||||
import {fromHex, toPubkeyHex, toRootHex} from "@lodestar/utils";
|
||||
import {
|
||||
ArrayOf,
|
||||
EmptyArgs,
|
||||
EmptyMeta,
|
||||
EmptyRequest,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import {ArrayType, ListBasicType, ListCompositeType, Type, isBasicType, isCompositeType} from "@chainsafe/ssz";
|
||||
import {Type} from "@chainsafe/ssz";
|
||||
import {ForkName} from "@lodestar/params";
|
||||
import {objectToExpectedCase} from "@lodestar/utils";
|
||||
import {
|
||||
@@ -68,16 +68,6 @@ export const EmptyResponseCodec: ResponseCodec<EmptyResponseEndpoint> = {
|
||||
isEmpty: true,
|
||||
};
|
||||
|
||||
export function ArrayOf<T>(elementType: Type<T>, limit = Infinity): ArrayType<Type<T>, unknown, unknown> {
|
||||
if (isCompositeType(elementType)) {
|
||||
return new ListCompositeType(elementType, limit) as unknown as ArrayType<Type<T>, unknown, unknown>;
|
||||
}
|
||||
if (isBasicType(elementType)) {
|
||||
return new ListBasicType(elementType, limit) as unknown as ArrayType<Type<T>, unknown, unknown>;
|
||||
}
|
||||
throw Error(`Unknown type ${elementType.typeName}`);
|
||||
}
|
||||
|
||||
export function WithMeta<T, M extends {version: ForkName}>(getType: (m: M) => Type<T>): ResponseDataCodec<T, M> {
|
||||
return {
|
||||
toJson: (data, meta: M) => getType(meta).toJson(data),
|
||||
|
||||
@@ -16,7 +16,6 @@ describe("beacon / config", () => {
|
||||
PRESET_BASE: "mainnet",
|
||||
DEPOSIT_CONTRACT_ADDRESS: "0xff50ed3d0ec03ac01d4c79aad74928bff48a7b2b",
|
||||
GENESIS_FORK_VERSION: "0x00001020",
|
||||
TERMINAL_TOTAL_DIFFICULTY: "115792089237316195423570985008687907853269984665640564039457584007913129639936",
|
||||
MIN_GENESIS_TIME: "1606824000",
|
||||
};
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"bugs": {
|
||||
"url": "https://github.com/ChainSafe/lodestar/issues"
|
||||
},
|
||||
"version": "1.37.0",
|
||||
"version": "1.38.0",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": {
|
||||
@@ -39,11 +39,6 @@
|
||||
"types": "./lib/db/index.d.ts",
|
||||
"import": "./lib/db/index.js"
|
||||
},
|
||||
"./eth1": {
|
||||
"bun": "./src/eth1/index.ts",
|
||||
"types": "./lib/eth1/index.d.ts",
|
||||
"import": "./lib/eth1/index.js"
|
||||
},
|
||||
"./metrics": {
|
||||
"bun": "./src/metrics/index.ts",
|
||||
"types": "./lib/metrics/index.d.ts",
|
||||
@@ -103,7 +98,6 @@
|
||||
"test:unit": "vitest run --project unit --project unit-minimal",
|
||||
"test:e2e": "vitest run --project e2e --project e2e-mainnet",
|
||||
"test:sim": "vitest run test/sim/**/*.test.ts",
|
||||
"test:sim:mergemock": "vitest run test/sim/mergemock.test.ts",
|
||||
"test:sim:blobs": "vitest run test/sim/4844-interop.test.ts",
|
||||
"download-spec-tests": "node --loader=ts-node/esm test/spec/downloadTests.ts",
|
||||
"test:spec:bls": "vitest run --project spec-minimal test/spec/bls/",
|
||||
@@ -127,7 +121,6 @@
|
||||
"@chainsafe/ssz": "^1.2.2",
|
||||
"@chainsafe/threads": "^1.11.3",
|
||||
"@crate-crypto/node-eth-kzg": "0.9.1",
|
||||
"@ethersproject/abi": "^5.7.0",
|
||||
"@fastify/bearer-auth": "^10.0.1",
|
||||
"@fastify/cors": "^10.0.1",
|
||||
"@fastify/swagger": "^9.0.0",
|
||||
|
||||
@@ -5,7 +5,7 @@ import {blockToHeader} from "@lodestar/state-transition";
|
||||
import {RootHex, SignedBeaconBlock, Slot} from "@lodestar/types";
|
||||
import {IBeaconChain} from "../../../../chain/interface.js";
|
||||
import {GENESIS_SLOT} from "../../../../constants/index.js";
|
||||
import {rootHexRegex} from "../../../../eth1/provider/utils.js";
|
||||
import {rootHexRegex} from "../../../../execution/engine/utils.js";
|
||||
import {ApiError, ValidationError} from "../../errors.js";
|
||||
|
||||
export function toBeaconHeaderResponse(
|
||||
|
||||
@@ -95,7 +95,7 @@ export function getBeaconStateApi({
|
||||
const {state, executionOptimistic, finalized} = await getState(stateId);
|
||||
const currentEpoch = getCurrentEpoch(state);
|
||||
const {validators, balances} = state; // Get the validators sub tree once for all the loop
|
||||
const {pubkey2index} = chain.getHeadState().epochCtx;
|
||||
const {pubkey2index} = chain;
|
||||
|
||||
const validatorResponses: routes.beacon.ValidatorResponse[] = [];
|
||||
if (validatorIds.length) {
|
||||
@@ -154,7 +154,7 @@ export function getBeaconStateApi({
|
||||
|
||||
async postStateValidatorIdentities({stateId, validatorIds = []}) {
|
||||
const {state, executionOptimistic, finalized} = await getState(stateId);
|
||||
const {pubkey2index} = chain.getHeadState().epochCtx;
|
||||
const {pubkey2index} = chain;
|
||||
|
||||
let validatorIdentities: routes.beacon.ValidatorIdentities;
|
||||
|
||||
@@ -187,7 +187,7 @@ export function getBeaconStateApi({
|
||||
|
||||
async getStateValidator({stateId, validatorId}) {
|
||||
const {state, executionOptimistic, finalized} = await getState(stateId);
|
||||
const {pubkey2index} = chain.getHeadState().epochCtx;
|
||||
const {pubkey2index} = chain;
|
||||
|
||||
const resp = getStateValidatorIndex(validatorId, state, pubkey2index);
|
||||
if (!resp.valid) {
|
||||
@@ -212,10 +212,9 @@ export function getBeaconStateApi({
|
||||
if (validatorIds.length) {
|
||||
assertUniqueItems(validatorIds, "Duplicate validator IDs provided");
|
||||
|
||||
const headState = chain.getHeadState();
|
||||
const balances: routes.beacon.ValidatorBalance[] = [];
|
||||
for (const id of validatorIds) {
|
||||
const resp = getStateValidatorIndex(id, state, headState.epochCtx.pubkey2index);
|
||||
const resp = getStateValidatorIndex(id, state, chain.pubkey2index);
|
||||
|
||||
if (resp.valid) {
|
||||
balances.push({
|
||||
|
||||
@@ -1511,7 +1511,7 @@ export function getValidatorApi(
|
||||
|
||||
const filteredRegistrations = registrations.filter((registration) => {
|
||||
const {pubkey} = registration.message;
|
||||
const validatorIndex = headState.epochCtx.pubkey2index.get(pubkey);
|
||||
const validatorIndex = chain.pubkey2index.get(pubkey);
|
||||
if (validatorIndex === null) return false;
|
||||
|
||||
const validator = headState.validators.getReadonly(validatorIndex);
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ExecutionStatus, ProtoBlock} from "@lodestar/fork-choice";
|
||||
import {ForkName, isForkPostFulu} from "@lodestar/params";
|
||||
import {
|
||||
@@ -7,8 +6,7 @@ import {
|
||||
computeEpochAtSlot,
|
||||
isStateValidatorsNodesPopulated,
|
||||
} from "@lodestar/state-transition";
|
||||
import {IndexedAttestation, bellatrix, deneb} from "@lodestar/types";
|
||||
import {Logger, toRootHex} from "@lodestar/utils";
|
||||
import {IndexedAttestation, deneb} from "@lodestar/types";
|
||||
import type {BeaconChain} from "../chain.js";
|
||||
import {BlockError, BlockErrorCode} from "../errors/index.js";
|
||||
import {BlockProcessOpts} from "../options.js";
|
||||
@@ -18,7 +16,6 @@ import {ImportBlockOpts} from "./types.js";
|
||||
import {DENEB_BLOWFISH_BANNER} from "./utils/blowfishBanner.js";
|
||||
import {ELECTRA_GIRAFFE_BANNER} from "./utils/giraffeBanner.js";
|
||||
import {CAPELLA_OWL_BANNER} from "./utils/ownBanner.js";
|
||||
import {POS_PANDA_MERGE_TRANSITION_BANNER} from "./utils/pandaMergeTransitionBanner.js";
|
||||
import {FULU_ZEBRA_BANNER} from "./utils/zebraBanner.js";
|
||||
import {verifyBlocksDataAvailability} from "./verifyBlocksDataAvailability.js";
|
||||
import {SegmentExecStatus, verifyBlocksExecutionPayload} from "./verifyBlocksExecutionPayloads.js";
|
||||
@@ -103,7 +100,6 @@ export async function verifyBlocksInEpoch(
|
||||
: Promise.resolve({
|
||||
execAborted: null,
|
||||
executionStatuses: blocks.map((_blk) => ExecutionStatus.Syncing),
|
||||
mergeBlockFound: null,
|
||||
} as SegmentExecStatus);
|
||||
|
||||
// Store indexed attestations for each block to avoid recomputing them during import
|
||||
@@ -143,6 +139,8 @@ export async function verifyBlocksInEpoch(
|
||||
// All signatures at once
|
||||
opts.skipVerifyBlockSignatures !== true
|
||||
? verifyBlocksSignatures(
|
||||
this.config,
|
||||
this.index2pubkey,
|
||||
this.bls,
|
||||
this.logger,
|
||||
this.metrics,
|
||||
@@ -162,12 +160,6 @@ export async function verifyBlocksInEpoch(
|
||||
]);
|
||||
|
||||
if (opts.verifyOnly !== true) {
|
||||
if (segmentExecStatus.execAborted === null && segmentExecStatus.mergeBlockFound !== null) {
|
||||
// merge block found and is fully valid = state transition + signatures + execution payload.
|
||||
// TODO: Will this banner be logged during syncing?
|
||||
logOnPowBlock(this.logger, this.config, segmentExecStatus.mergeBlockFound);
|
||||
}
|
||||
|
||||
const fromForkBoundary = this.config.getForkBoundaryAtEpoch(computeEpochAtSlot(parentBlock.slot));
|
||||
const toForkBoundary = this.config.getForkBoundaryAtEpoch(computeEpochAtSlot(lastBlock.message.slot));
|
||||
|
||||
@@ -250,16 +242,3 @@ export async function verifyBlocksInEpoch(
|
||||
abortController.abort();
|
||||
}
|
||||
}
|
||||
|
||||
function logOnPowBlock(logger: Logger, config: ChainForkConfig, mergeBlock: bellatrix.BeaconBlock): void {
|
||||
const mergeBlockHash = toRootHex(config.getForkTypes(mergeBlock.slot).BeaconBlock.hashTreeRoot(mergeBlock));
|
||||
const mergeExecutionHash = toRootHex(mergeBlock.body.executionPayload.blockHash);
|
||||
const mergePowHash = toRootHex(mergeBlock.body.executionPayload.parentHash);
|
||||
logger.info(POS_PANDA_MERGE_TRANSITION_BANNER);
|
||||
logger.info("Execution transitioning from PoW to PoS!!!");
|
||||
logger.info("Importing block referencing terminal PoW block", {
|
||||
blockHash: mergeBlockHash,
|
||||
executionHash: mergeExecutionHash,
|
||||
powHash: mergePowHash,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -6,19 +6,16 @@ import {
|
||||
LVHValidResponse,
|
||||
MaybeValidExecutionStatus,
|
||||
ProtoBlock,
|
||||
assertValidTerminalPowBlock,
|
||||
} from "@lodestar/fork-choice";
|
||||
import {ForkSeq, SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY} from "@lodestar/params";
|
||||
import {ForkSeq} from "@lodestar/params";
|
||||
import {
|
||||
CachedBeaconStateAllForks,
|
||||
isExecutionBlockBodyType,
|
||||
isExecutionEnabled,
|
||||
isExecutionStateType,
|
||||
isMergeTransitionBlock as isMergeTransitionBlockFn,
|
||||
} from "@lodestar/state-transition";
|
||||
import {Slot, bellatrix, electra} from "@lodestar/types";
|
||||
import {bellatrix, electra} from "@lodestar/types";
|
||||
import {ErrorAborted, Logger, toRootHex} from "@lodestar/utils";
|
||||
import {IEth1ForBlockProduction} from "../../eth1/index.js";
|
||||
import {ExecutionPayloadStatus, IExecutionEngine} from "../../execution/engine/interface.js";
|
||||
import {Metrics} from "../../metrics/metrics.js";
|
||||
import {IClock} from "../../util/clock.js";
|
||||
@@ -29,7 +26,6 @@ import {IBlockInput} from "./blockInput/types.js";
|
||||
import {ImportBlockOpts} from "./types.js";
|
||||
|
||||
export type VerifyBlockExecutionPayloadModules = {
|
||||
eth1: IEth1ForBlockProduction;
|
||||
executionEngine: IExecutionEngine;
|
||||
clock: IClock;
|
||||
logger: Logger;
|
||||
@@ -44,9 +40,8 @@ export type SegmentExecStatus =
|
||||
execAborted: null;
|
||||
executionStatuses: MaybeValidExecutionStatus[];
|
||||
executionTime: number;
|
||||
mergeBlockFound: bellatrix.BeaconBlock | null;
|
||||
}
|
||||
| {execAborted: ExecAbortType; invalidSegmentLVH?: LVHInvalidResponse; mergeBlockFound: null};
|
||||
| {execAborted: ExecAbortType; invalidSegmentLVH?: LVHInvalidResponse};
|
||||
|
||||
type VerifyExecutionErrorResponse =
|
||||
| {executionStatus: ExecutionStatus.Invalid; lvhResponse: LVHInvalidResponse; execError: BlockError}
|
||||
@@ -72,7 +67,6 @@ export async function verifyBlocksExecutionPayload(
|
||||
opts: BlockProcessOpts & ImportBlockOpts
|
||||
): Promise<SegmentExecStatus> {
|
||||
const executionStatuses: MaybeValidExecutionStatus[] = [];
|
||||
let mergeBlockFound: bellatrix.BeaconBlock | null = null;
|
||||
const recvToValLatency = Date.now() / 1000 - (opts.seenTimestampSec ?? Date.now() / 1000);
|
||||
const lastBlock = blockInputs.at(-1);
|
||||
|
||||
@@ -96,57 +90,9 @@ export async function verifyBlocksExecutionPayload(
|
||||
// will either validate or prune invalid blocks
|
||||
//
|
||||
// We need to track and keep updating if its safe to optimistically import these blocks.
|
||||
// The following is how we determine for a block if its safe:
|
||||
//
|
||||
// (but we need to modify this check for this segment of blocks because it checks if the
|
||||
// parent of any block imported in forkchoice is post-merge and currently we could only
|
||||
// have blocks[0]'s parent imported in the chain as this is no longer one by one verify +
|
||||
// import.)
|
||||
//
|
||||
//
|
||||
// When to import such blocks:
|
||||
// From: https://github.com/ethereum/consensus-specs/pull/2844
|
||||
// A block MUST NOT be optimistically imported, unless either of the following
|
||||
// conditions are met:
|
||||
//
|
||||
// 1. Parent of the block has execution
|
||||
//
|
||||
// Since with the sync optimizations, the previous block might not have been in the
|
||||
// forkChoice yet, so the below check could fail for safeSlotsToImportOptimistically
|
||||
//
|
||||
// Luckily, we can depend on the preState0 to see if we are already post merge w.r.t
|
||||
// the blocks we are importing.
|
||||
//
|
||||
// Or in other words if
|
||||
// - block status is syncing
|
||||
// - and we are not in a post merge world and is parent is not optimistically safe
|
||||
// - and we are syncing close to the chain head i.e. clock slot
|
||||
// - and parent is optimistically safe
|
||||
//
|
||||
// then throw error
|
||||
//
|
||||
//
|
||||
// - if we haven't yet imported a post merge ancestor in forkchoice i.e.
|
||||
// - and we are syncing close to the clockSlot, i.e. merge Transition could be underway
|
||||
//
|
||||
//
|
||||
// 2. The current slot (as per the system clock) is at least
|
||||
// SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY ahead of the slot of the block being
|
||||
// imported.
|
||||
// This means that the merge transition could be underway and we can't afford to import
|
||||
// a block which is not fully validated as it could affect liveliness of the network.
|
||||
//
|
||||
//
|
||||
// For this segment of blocks:
|
||||
// We are optimistically safe with respect to this entire block segment if:
|
||||
// - all the blocks are way behind the current slot
|
||||
// - or we have already imported a post-merge parent of first block of this chain in forkchoice
|
||||
const currentSlot = chain.clock.currentSlot;
|
||||
const safeSlotsToImportOptimistically = opts.safeSlotsToImportOptimistically ?? SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY;
|
||||
let isOptimisticallySafe =
|
||||
parentBlock.executionStatus !== ExecutionStatus.PreMerge ||
|
||||
lastBlock.slot + safeSlotsToImportOptimistically < currentSlot;
|
||||
|
||||
for (let blockIndex = 0; blockIndex < blockInputs.length; blockIndex++) {
|
||||
const blockInput = blockInputs[blockIndex];
|
||||
// If blocks are invalid in consensus the main promise could resolve before this loop ends.
|
||||
@@ -154,14 +100,7 @@ export async function verifyBlocksExecutionPayload(
|
||||
if (signal.aborted) {
|
||||
throw new ErrorAborted("verifyBlockExecutionPayloads");
|
||||
}
|
||||
const verifyResponse = await verifyBlockExecutionPayload(
|
||||
chain,
|
||||
blockInput,
|
||||
preState0,
|
||||
opts,
|
||||
isOptimisticallySafe,
|
||||
currentSlot
|
||||
);
|
||||
const verifyResponse = await verifyBlockExecutionPayload(chain, blockInput, preState0);
|
||||
|
||||
// If execError has happened, then we need to extract the segmentExecStatus and return
|
||||
if (verifyResponse.execError !== null) {
|
||||
@@ -170,75 +109,7 @@ export async function verifyBlocksExecutionPayload(
|
||||
|
||||
// If we are here then its because executionStatus is one of MaybeValidExecutionStatus
|
||||
const {executionStatus} = verifyResponse;
|
||||
// It becomes optimistically safe for following blocks if a post-merge block is deemed fit
|
||||
// for import. If it would not have been safe verifyBlockExecutionPayload would have
|
||||
// returned execError and loop would have been aborted
|
||||
if (executionStatus !== ExecutionStatus.PreMerge) {
|
||||
isOptimisticallySafe = true;
|
||||
}
|
||||
executionStatuses.push(executionStatus);
|
||||
|
||||
const blockBody = blockInput.getBlock().message.body;
|
||||
const isMergeTransitionBlock =
|
||||
// If the merge block is found, stop the search as the isMergeTransitionBlockFn condition
|
||||
// will still evaluate to true for the following blocks leading to errors (while syncing)
|
||||
// as the preState0 still belongs to the pre state of the first block on segment
|
||||
mergeBlockFound === null &&
|
||||
isExecutionStateType(preState0) &&
|
||||
isExecutionBlockBodyType(blockBody) &&
|
||||
isMergeTransitionBlockFn(preState0, blockBody);
|
||||
|
||||
// If this is a merge transition block, check to ensure if it references
|
||||
// a valid terminal PoW block.
|
||||
//
|
||||
// However specs define this check to be run inside forkChoice's onBlock
|
||||
// (https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/fork-choice.md#on_block)
|
||||
// but we perform the check here (as inspired from the lighthouse impl)
|
||||
//
|
||||
// Reasons:
|
||||
// 1. If the block is not valid, we should fail early and not wait till
|
||||
// forkChoice import.
|
||||
// 2. It makes logical sense to pair it with the block validations and
|
||||
// deal it with the external services like eth1 tracker here than
|
||||
// in import block
|
||||
if (isMergeTransitionBlock) {
|
||||
const mergeBlock = blockInput.getBlock().message as bellatrix.BeaconBlock;
|
||||
const mergeBlockHash = toRootHex(chain.config.getForkTypes(mergeBlock.slot).BeaconBlock.hashTreeRoot(mergeBlock));
|
||||
const powBlockRootHex = toRootHex(mergeBlock.body.executionPayload.parentHash);
|
||||
const powBlock = await chain.eth1.getPowBlock(powBlockRootHex).catch((error) => {
|
||||
// Lets just warn the user here, errors if any will be reported on
|
||||
// `assertValidTerminalPowBlock` checks
|
||||
chain.logger.warn(
|
||||
"Error fetching terminal PoW block referred in the merge transition block",
|
||||
{powBlockHash: powBlockRootHex, mergeBlockHash},
|
||||
error
|
||||
);
|
||||
return null;
|
||||
});
|
||||
|
||||
const powBlockParent =
|
||||
powBlock &&
|
||||
(await chain.eth1.getPowBlock(powBlock.parentHash).catch((error) => {
|
||||
// Lets just warn the user here, errors if any will be reported on
|
||||
// `assertValidTerminalPowBlock` checks
|
||||
chain.logger.warn(
|
||||
"Error fetching parent of the terminal PoW block referred in the merge transition block",
|
||||
{powBlockParentHash: powBlock.parentHash, powBlock: powBlockRootHex, mergeBlockHash},
|
||||
error
|
||||
);
|
||||
return null;
|
||||
}));
|
||||
|
||||
// executionStatus will never == ExecutionStatus.PreMerge if it's the mergeBlock. But gotta make TS happy =D
|
||||
if (executionStatus === ExecutionStatus.PreMerge) {
|
||||
throw Error("Merge block must not have executionStatus == PreMerge");
|
||||
}
|
||||
|
||||
assertValidTerminalPowBlock(chain.config, mergeBlock, {executionStatus, powBlock, powBlockParent});
|
||||
// Valid execution payload, but may not be in a valid beacon chain block. Delay printing the POS ACTIVATED banner
|
||||
// to the end of the verify block routine, which confirms that this block is fully valid.
|
||||
mergeBlockFound = mergeBlock;
|
||||
}
|
||||
}
|
||||
|
||||
const executionTime = Date.now();
|
||||
@@ -265,7 +136,6 @@ export async function verifyBlocksExecutionPayload(
|
||||
execAborted: null,
|
||||
executionStatuses,
|
||||
executionTime,
|
||||
mergeBlockFound,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -275,28 +145,20 @@ export async function verifyBlocksExecutionPayload(
|
||||
export async function verifyBlockExecutionPayload(
|
||||
chain: VerifyBlockExecutionPayloadModules,
|
||||
blockInput: IBlockInput,
|
||||
preState0: CachedBeaconStateAllForks,
|
||||
opts: BlockProcessOpts,
|
||||
isOptimisticallySafe: boolean,
|
||||
currentSlot: Slot
|
||||
preState0: CachedBeaconStateAllForks
|
||||
): Promise<VerifyBlockExecutionResponse> {
|
||||
const block = blockInput.getBlock();
|
||||
/** Not null if execution is enabled */
|
||||
const executionPayloadEnabled =
|
||||
isExecutionStateType(preState0) &&
|
||||
isExecutionBlockBodyType(block.message.body) &&
|
||||
// Safe to use with a state previous to block's preState. isMergeComplete can only transition from false to true.
|
||||
// - If preState0 is after merge block: condition is true, and will always be true
|
||||
// - If preState0 is before merge block: the block could lie but then state transition function will throw above
|
||||
// It is kinda safe to send non-trusted payloads to the execution client because at most it can trigger sync.
|
||||
// TODO: If this becomes a problem, do some basic verification beforehand, like checking the proposer signature.
|
||||
isExecutionEnabled(preState0, block.message)
|
||||
? block.message.body.executionPayload
|
||||
: null;
|
||||
|
||||
if (!executionPayloadEnabled) {
|
||||
// isExecutionEnabled() -> false
|
||||
return {executionStatus: ExecutionStatus.PreMerge, execError: null} as VerifyBlockExecutionResponse;
|
||||
// Pre-merge block, no execution payload to verify
|
||||
return {executionStatus: ExecutionStatus.PreMerge, lvhResponse: undefined, execError: null};
|
||||
}
|
||||
|
||||
// TODO: Handle better notifyNewPayload() returning error is syncing
|
||||
@@ -343,24 +205,10 @@ export async function verifyBlockExecutionPayload(
|
||||
}
|
||||
|
||||
// Accepted and Syncing have the same treatment, as final validation of block is pending
|
||||
// Post-merge, we're always safe to optimistically import
|
||||
case ExecutionPayloadStatus.ACCEPTED:
|
||||
case ExecutionPayloadStatus.SYNCING: {
|
||||
// Check if the entire segment was deemed safe or, this block specifically itself if not in
|
||||
// the safeSlotsToImportOptimistically window of current slot, then we can import else
|
||||
// we need to throw and not import his block
|
||||
const safeSlotsToImportOptimistically =
|
||||
opts.safeSlotsToImportOptimistically ?? SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY;
|
||||
if (!isOptimisticallySafe && blockInput.slot + safeSlotsToImportOptimistically >= currentSlot) {
|
||||
const execError = new BlockError(block, {
|
||||
code: BlockErrorCode.EXECUTION_ENGINE_ERROR,
|
||||
execStatus: ExecutionPayloadStatus.UNSAFE_OPTIMISTIC_STATUS,
|
||||
errorMessage: `not safe to import ${execResult.status} payload within ${opts.safeSlotsToImportOptimistically} of currentSlot`,
|
||||
});
|
||||
return {executionStatus: null, execError} as VerifyBlockExecutionResponse;
|
||||
}
|
||||
|
||||
case ExecutionPayloadStatus.SYNCING:
|
||||
return {executionStatus: ExecutionStatus.Syncing, execError: null};
|
||||
}
|
||||
|
||||
// If the block has is not valid, or it referenced an invalid terminal block then the
|
||||
// block is invalid, however it has no bearing on any forkChoice cleanup
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import {CachedBeaconStateAllForks, getBlockSignatureSets} from "@lodestar/state-transition";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {CachedBeaconStateAllForks, Index2PubkeyCache, getBlockSignatureSets} from "@lodestar/state-transition";
|
||||
import {IndexedAttestation, SignedBeaconBlock} from "@lodestar/types";
|
||||
import {Logger} from "@lodestar/utils";
|
||||
import {Metrics} from "../../metrics/metrics.js";
|
||||
@@ -15,6 +16,8 @@ import {ImportBlockOpts} from "./types.js";
|
||||
* Since all data is known in advance all signatures are verified at once in parallel.
|
||||
*/
|
||||
export async function verifyBlocksSignatures(
|
||||
config: BeaconConfig,
|
||||
index2pubkey: Index2PubkeyCache,
|
||||
bls: IBlsVerifier,
|
||||
logger: Logger,
|
||||
metrics: Metrics | null,
|
||||
@@ -25,6 +28,7 @@ export async function verifyBlocksSignatures(
|
||||
): Promise<{verifySignaturesTime: number}> {
|
||||
const isValidPromises: Promise<boolean>[] = [];
|
||||
const recvToValLatency = Date.now() / 1000 - (opts.seenTimestampSec ?? Date.now() / 1000);
|
||||
const currentSyncCommitteeIndexed = preState0.epochCtx.currentSyncCommitteeIndexed;
|
||||
|
||||
// Verifies signatures after running state transition, so all SyncCommittee signed roots are known at this point.
|
||||
// We must ensure block.slot <= state.slot before running getAllBlockSignatureSets().
|
||||
@@ -38,9 +42,16 @@ export async function verifyBlocksSignatures(
|
||||
: //
|
||||
// Verify signatures per block to track which block is invalid
|
||||
bls.verifySignatureSets(
|
||||
getBlockSignatureSets(preState0, block, indexedAttestationsByBlock[i], {
|
||||
skipProposerSignature: opts.validProposerSignature,
|
||||
})
|
||||
getBlockSignatureSets(
|
||||
config,
|
||||
index2pubkey,
|
||||
currentSyncCommitteeIndexed,
|
||||
block,
|
||||
indexedAttestationsByBlock[i],
|
||||
{
|
||||
skipProposerSignature: opts.validProposerSignature,
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
// getBlockSignatureSets() takes 45ms in benchmarks for 2022Q2 mainnet blocks (100 sigs). When syncing a 32 blocks
|
||||
|
||||
@@ -3,7 +3,7 @@ import {PrivateKey} from "@libp2p/interface";
|
||||
import {PubkeyIndexMap} from "@chainsafe/pubkey-index-map";
|
||||
import {CompositeTypeAny, TreeView, Type} from "@chainsafe/ssz";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {CheckpointWithHex, ExecutionStatus, IForkChoice, ProtoBlock, UpdateHeadOpt} from "@lodestar/fork-choice";
|
||||
import {CheckpointWithHex, IForkChoice, ProtoBlock, UpdateHeadOpt} from "@lodestar/fork-choice";
|
||||
import {LoggerNode} from "@lodestar/logger/node";
|
||||
import {EFFECTIVE_BALANCE_INCREMENT, GENESIS_SLOT, SLOTS_PER_EPOCH, isForkPostElectra} from "@lodestar/params";
|
||||
import {
|
||||
@@ -14,13 +14,14 @@ import {
|
||||
EpochShuffling,
|
||||
Index2PubkeyCache,
|
||||
computeAnchorCheckpoint,
|
||||
computeAttestationsRewards,
|
||||
computeBlockRewards,
|
||||
computeEndSlotAtEpoch,
|
||||
computeEpochAtSlot,
|
||||
computeStartSlotAtEpoch,
|
||||
createCachedBeaconState,
|
||||
computeSyncCommitteeRewards,
|
||||
getEffectiveBalanceIncrementsZeroInactive,
|
||||
getEffectiveBalancesFromStateBytes,
|
||||
isCachedBeaconState,
|
||||
processSlots,
|
||||
} from "@lodestar/state-transition";
|
||||
import {
|
||||
@@ -38,12 +39,12 @@ import {
|
||||
Wei,
|
||||
isBlindedBeaconBlock,
|
||||
phase0,
|
||||
rewards,
|
||||
} from "@lodestar/types";
|
||||
import {Logger, fromHex, gweiToWei, isErrorAborted, pruneSetToMax, sleep, toRootHex} from "@lodestar/utils";
|
||||
import {ProcessShutdownCallback} from "@lodestar/validator";
|
||||
import {GENESIS_EPOCH, ZERO_HASH} from "../constants/index.js";
|
||||
import {IBeaconDb} from "../db/index.js";
|
||||
import {IEth1ForBlockProduction} from "../eth1/index.js";
|
||||
import {BuilderStatus} from "../execution/builder/http.js";
|
||||
import {IExecutionBuilder, IExecutionEngine} from "../execution/index.js";
|
||||
import {Metrics} from "../metrics/index.js";
|
||||
@@ -80,9 +81,6 @@ import {AssembledBlockType, BlockType, ProduceResult} from "./produceBlock/index
|
||||
import {BlockAttributes, produceBlockBody, produceCommonBlockBody} from "./produceBlock/produceBlockBody.js";
|
||||
import {QueuedStateRegenerator, RegenCaller} from "./regen/index.js";
|
||||
import {ReprocessController} from "./reprocess.js";
|
||||
import {AttestationsRewards, computeAttestationsRewards} from "./rewards/attestationsRewards.js";
|
||||
import {BlockRewards, computeBlockRewards} from "./rewards/blockRewards.js";
|
||||
import {SyncCommitteeRewards, computeSyncCommitteeRewards} from "./rewards/syncCommitteeRewards.js";
|
||||
import {
|
||||
SeenAggregators,
|
||||
SeenAttesters,
|
||||
@@ -117,7 +115,6 @@ const DEFAULT_MAX_CACHED_PRODUCED_RESULTS = 4;
|
||||
export class BeaconChain implements IBeaconChain {
|
||||
readonly genesisTime: UintNum64;
|
||||
readonly genesisValidatorsRoot: Root;
|
||||
readonly eth1: IEth1ForBlockProduction;
|
||||
readonly executionEngine: IExecutionEngine;
|
||||
readonly executionBuilder?: IExecutionBuilder;
|
||||
// Expose config for convenience in modularized functions
|
||||
@@ -144,7 +141,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
readonly aggregatedAttestationPool: AggregatedAttestationPool;
|
||||
readonly syncCommitteeMessagePool: SyncCommitteeMessagePool;
|
||||
readonly syncContributionAndProofPool;
|
||||
readonly opPool = new OpPool();
|
||||
readonly opPool: OpPool;
|
||||
|
||||
// Gossip seen cache
|
||||
readonly seenAttesters = new SeenAttesters();
|
||||
@@ -206,6 +203,8 @@ export class BeaconChain implements IBeaconChain {
|
||||
{
|
||||
privateKey,
|
||||
config,
|
||||
pubkey2index,
|
||||
index2pubkey,
|
||||
db,
|
||||
dbName,
|
||||
dataDir,
|
||||
@@ -216,12 +215,13 @@ export class BeaconChain implements IBeaconChain {
|
||||
validatorMonitor,
|
||||
anchorState,
|
||||
isAnchorStateFinalized,
|
||||
eth1,
|
||||
executionEngine,
|
||||
executionBuilder,
|
||||
}: {
|
||||
privateKey: PrivateKey;
|
||||
config: BeaconConfig;
|
||||
pubkey2index: PubkeyIndexMap;
|
||||
index2pubkey: Index2PubkeyCache;
|
||||
db: IBeaconDb;
|
||||
dbName: string;
|
||||
dataDir: string;
|
||||
@@ -231,9 +231,8 @@ export class BeaconChain implements IBeaconChain {
|
||||
clock?: IClock;
|
||||
metrics: Metrics | null;
|
||||
validatorMonitor: ValidatorMonitor | null;
|
||||
anchorState: BeaconStateAllForks;
|
||||
anchorState: CachedBeaconStateAllForks;
|
||||
isAnchorStateFinalized: boolean;
|
||||
eth1: IEth1ForBlockProduction;
|
||||
executionEngine: IExecutionEngine;
|
||||
executionBuilder?: IExecutionBuilder;
|
||||
}
|
||||
@@ -248,7 +247,6 @@ export class BeaconChain implements IBeaconChain {
|
||||
this.genesisTime = anchorState.genesisTime;
|
||||
this.anchorStateLatestBlockSlot = anchorState.latestBlockHeader.slot;
|
||||
this.genesisValidatorsRoot = anchorState.genesisValidatorsRoot;
|
||||
this.eth1 = eth1;
|
||||
this.executionEngine = executionEngine;
|
||||
this.executionBuilder = executionBuilder;
|
||||
const signal = this.abortController.signal;
|
||||
@@ -265,6 +263,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
this.aggregatedAttestationPool = new AggregatedAttestationPool(this.config, metrics);
|
||||
this.syncCommitteeMessagePool = new SyncCommitteeMessagePool(config, clock, this.opts?.preaggregateSlotDistance);
|
||||
this.syncContributionAndProofPool = new SyncContributionAndProofPool(config, clock, metrics, logger);
|
||||
this.opPool = new OpPool(config);
|
||||
|
||||
this.seenAggregatedAttestations = new SeenAggregatedAttestations(metrics);
|
||||
this.seenContributionAndProof = new SeenContributionAndProof(metrics);
|
||||
@@ -291,39 +290,25 @@ export class BeaconChain implements IBeaconChain {
|
||||
logger,
|
||||
});
|
||||
|
||||
// Restore state caches
|
||||
// anchorState may already by a CachedBeaconState. If so, don't create the cache again, since deserializing all
|
||||
// pubkeys takes ~30 seconds for 350k keys (mainnet 2022Q2).
|
||||
// When the BeaconStateCache is created in eth1 genesis builder it may be incorrect. Until we can ensure that
|
||||
// it's safe to re-use _ANY_ BeaconStateCache, this option is disabled by default and only used in tests.
|
||||
const cachedState =
|
||||
isCachedBeaconState(anchorState) && opts.skipCreateStateCacheIfAvailable
|
||||
? anchorState
|
||||
: createCachedBeaconState(anchorState, {
|
||||
config,
|
||||
pubkey2index: new PubkeyIndexMap(),
|
||||
index2pubkey: [],
|
||||
});
|
||||
this._earliestAvailableSlot = cachedState.slot;
|
||||
|
||||
this.shufflingCache = cachedState.epochCtx.shufflingCache = new ShufflingCache(metrics, logger, this.opts, [
|
||||
this._earliestAvailableSlot = anchorState.slot;
|
||||
this.shufflingCache = anchorState.epochCtx.shufflingCache = new ShufflingCache(metrics, logger, this.opts, [
|
||||
{
|
||||
shuffling: cachedState.epochCtx.previousShuffling,
|
||||
decisionRoot: cachedState.epochCtx.previousDecisionRoot,
|
||||
shuffling: anchorState.epochCtx.previousShuffling,
|
||||
decisionRoot: anchorState.epochCtx.previousDecisionRoot,
|
||||
},
|
||||
{
|
||||
shuffling: cachedState.epochCtx.currentShuffling,
|
||||
decisionRoot: cachedState.epochCtx.currentDecisionRoot,
|
||||
shuffling: anchorState.epochCtx.currentShuffling,
|
||||
decisionRoot: anchorState.epochCtx.currentDecisionRoot,
|
||||
},
|
||||
{
|
||||
shuffling: cachedState.epochCtx.nextShuffling,
|
||||
decisionRoot: cachedState.epochCtx.nextDecisionRoot,
|
||||
shuffling: anchorState.epochCtx.nextShuffling,
|
||||
decisionRoot: anchorState.epochCtx.nextDecisionRoot,
|
||||
},
|
||||
]);
|
||||
|
||||
// Persist single global instance of state caches
|
||||
this.pubkey2index = cachedState.epochCtx.pubkey2index;
|
||||
this.index2pubkey = cachedState.epochCtx.index2pubkey;
|
||||
// Global cache of validators pubkey/index mapping
|
||||
this.pubkey2index = pubkey2index;
|
||||
this.index2pubkey = index2pubkey;
|
||||
|
||||
const fileDataStore = opts.nHistoricalStatesFileDataStore ?? true;
|
||||
const blockStateCache = this.opts.nHistoricalStates
|
||||
@@ -339,6 +324,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
this.cpStateDatastore = fileDataStore ? new FileCPStateDatastore(dataDir) : new DbCPStateDatastore(this.db);
|
||||
checkpointStateCache = new PersistentCheckpointStateCache(
|
||||
{
|
||||
config,
|
||||
metrics,
|
||||
logger,
|
||||
clock,
|
||||
@@ -353,15 +339,15 @@ export class BeaconChain implements IBeaconChain {
|
||||
}
|
||||
|
||||
const {checkpoint} = computeAnchorCheckpoint(config, anchorState);
|
||||
blockStateCache.add(cachedState);
|
||||
blockStateCache.setHeadState(cachedState);
|
||||
checkpointStateCache.add(checkpoint, cachedState);
|
||||
blockStateCache.add(anchorState);
|
||||
blockStateCache.setHeadState(anchorState);
|
||||
checkpointStateCache.add(checkpoint, anchorState);
|
||||
|
||||
const forkChoice = initializeForkChoice(
|
||||
config,
|
||||
emitter,
|
||||
clock.currentSlot,
|
||||
cachedState,
|
||||
anchorState,
|
||||
isAnchorStateFinalized,
|
||||
opts,
|
||||
this.justifiedBalancesGetter.bind(this),
|
||||
@@ -417,15 +403,6 @@ export class BeaconChain implements IBeaconChain {
|
||||
signal
|
||||
);
|
||||
|
||||
// Stop polling eth1 data if anchor state is in Electra AND deposit_requests_start_index is reached
|
||||
const anchorStateFork = this.config.getForkName(anchorState.slot);
|
||||
if (isForkPostElectra(anchorStateFork)) {
|
||||
const {eth1DepositIndex, depositRequestsStartIndex} = anchorState as BeaconStateElectra;
|
||||
if (eth1DepositIndex === Number(depositRequestsStartIndex)) {
|
||||
this.eth1.stopPollingEth1Data();
|
||||
}
|
||||
}
|
||||
|
||||
// always run PrepareNextSlotScheduler except for fork_choice spec tests
|
||||
if (!opts?.disablePrepareNextSlot) {
|
||||
new PrepareNextSlotScheduler(this, this.config, metrics, this.logger, signal);
|
||||
@@ -757,7 +734,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
RegenCaller.produceBlock
|
||||
);
|
||||
const proposerIndex = state.epochCtx.getBeaconProposer(slot);
|
||||
const proposerPubKey = state.epochCtx.index2pubkey[proposerIndex].toBytes();
|
||||
const proposerPubKey = this.index2pubkey[proposerIndex].toBytes();
|
||||
|
||||
const {body, produceResult, executionPayloadValue, shouldOverrideBuilder} = await produceBlockBody.call(
|
||||
this,
|
||||
@@ -1177,17 +1154,6 @@ export class BeaconChain implements IBeaconChain {
|
||||
this.seenAggregatedAttestations.prune(epoch);
|
||||
this.seenBlockAttesters.prune(epoch);
|
||||
this.beaconProposerCache.prune(epoch);
|
||||
|
||||
// Poll for merge block in the background to speed-up block production. Only if:
|
||||
// - after BELLATRIX_FORK_EPOCH
|
||||
// - Beacon node synced
|
||||
// - head state not isMergeTransitionComplete
|
||||
if (this.config.BELLATRIX_FORK_EPOCH - epoch < 1) {
|
||||
const head = this.forkChoice.getHead();
|
||||
if (epoch - computeEpochAtSlot(head.slot) < 5 && head.executionStatus === ExecutionStatus.PreMerge) {
|
||||
this.eth1.startPollingMergeBlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected onNewHead(head: ProtoBlock): void {
|
||||
@@ -1320,7 +1286,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
}
|
||||
}
|
||||
|
||||
async getBlockRewards(block: BeaconBlock | BlindedBeaconBlock): Promise<BlockRewards> {
|
||||
async getBlockRewards(block: BeaconBlock | BlindedBeaconBlock): Promise<rewards.BlockRewards> {
|
||||
let preState = this.regen.getPreStateSync(block);
|
||||
|
||||
if (preState === null) {
|
||||
@@ -1331,13 +1297,13 @@ export class BeaconChain implements IBeaconChain {
|
||||
|
||||
const postState = this.regen.getStateSync(toRootHex(block.stateRoot)) ?? undefined;
|
||||
|
||||
return computeBlockRewards(block, preState.clone(), postState?.clone());
|
||||
return computeBlockRewards(this.config, block, preState.clone(), postState?.clone());
|
||||
}
|
||||
|
||||
async getAttestationsRewards(
|
||||
epoch: Epoch,
|
||||
validatorIds?: (ValidatorIndex | string)[]
|
||||
): Promise<{rewards: AttestationsRewards; executionOptimistic: boolean; finalized: boolean}> {
|
||||
): Promise<{rewards: rewards.AttestationsRewards; executionOptimistic: boolean; finalized: boolean}> {
|
||||
// We use end slot of (epoch + 1) to ensure we have seen all attestations. On-time or late. Any late attestation beyond this slot is not considered
|
||||
const slot = computeEndSlotAtEpoch(epoch + 1);
|
||||
const stateResult = await this.getStateBySlot(slot, {allowRegen: false}); // No regen if state not in cache
|
||||
@@ -1355,7 +1321,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
throw Error(`State is not in cache for slot ${slot}`);
|
||||
}
|
||||
|
||||
const rewards = await computeAttestationsRewards(epoch, cachedState, this.config, validatorIds);
|
||||
const rewards = await computeAttestationsRewards(this.config, this.pubkey2index, cachedState, validatorIds);
|
||||
|
||||
return {rewards, executionOptimistic, finalized};
|
||||
}
|
||||
@@ -1363,7 +1329,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
async getSyncCommitteeRewards(
|
||||
block: BeaconBlock | BlindedBeaconBlock,
|
||||
validatorIds?: (ValidatorIndex | string)[]
|
||||
): Promise<SyncCommitteeRewards> {
|
||||
): Promise<rewards.SyncCommitteeRewards> {
|
||||
let preState = this.regen.getPreStateSync(block);
|
||||
|
||||
if (preState === null) {
|
||||
@@ -1372,6 +1338,6 @@ export class BeaconChain implements IBeaconChain {
|
||||
|
||||
preState = processSlots(preState, block.slot); // Dial preState's slot to block.slot
|
||||
|
||||
return computeSyncCommitteeRewards(block, preState.clone(), validatorIds);
|
||||
return computeSyncCommitteeRewards(this.config, this.index2pubkey, block, preState.clone(), validatorIds);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
import {Tree, toGindex} from "@chainsafe/persistent-merkle-tree";
|
||||
import {BeaconConfig, ChainForkConfig} from "@lodestar/config";
|
||||
import {GENESIS_EPOCH, GENESIS_SLOT} from "@lodestar/params";
|
||||
import {
|
||||
BeaconStateAllForks,
|
||||
CachedBeaconStateAllForks,
|
||||
applyDeposits,
|
||||
applyEth1BlockHash,
|
||||
applyTimestamp,
|
||||
createCachedBeaconState,
|
||||
createEmptyEpochCacheImmutableData,
|
||||
getActiveValidatorIndices,
|
||||
getGenesisBeaconState,
|
||||
getTemporaryBlockHeader,
|
||||
} from "@lodestar/state-transition";
|
||||
import {phase0, ssz} from "@lodestar/types";
|
||||
import {Logger} from "@lodestar/utils";
|
||||
import {DepositTree} from "../../db/repositories/depositDataRoot.js";
|
||||
import {IEth1Provider} from "../../eth1/index.js";
|
||||
import {IEth1StreamParams} from "../../eth1/interface.js";
|
||||
import {getDepositsAndBlockStreamForGenesis, getDepositsStream} from "../../eth1/stream.js";
|
||||
import {GenesisResult, IGenesisBuilder} from "./interface.js";
|
||||
|
||||
export type GenesisBuilderKwargs = {
|
||||
config: ChainForkConfig;
|
||||
eth1Provider: IEth1Provider;
|
||||
logger: Logger;
|
||||
|
||||
/** Use to restore pending progress */
|
||||
pendingStatus?: {
|
||||
state: BeaconStateAllForks;
|
||||
depositTree: DepositTree;
|
||||
lastProcessedBlockNumber: number;
|
||||
};
|
||||
|
||||
signal?: AbortSignal;
|
||||
maxBlocksPerPoll?: number;
|
||||
};
|
||||
|
||||
export class GenesisBuilder implements IGenesisBuilder {
|
||||
// Expose state to persist on error
|
||||
readonly state: CachedBeaconStateAllForks;
|
||||
readonly depositTree: DepositTree;
|
||||
/** Is null if no block has been processed yet */
|
||||
lastProcessedBlockNumber: number | null = null;
|
||||
|
||||
private readonly config: BeaconConfig;
|
||||
private readonly eth1Provider: IEth1Provider;
|
||||
private readonly logger: Logger;
|
||||
private readonly signal?: AbortSignal;
|
||||
private readonly eth1Params: IEth1StreamParams;
|
||||
private readonly depositCache = new Set<number>();
|
||||
private readonly fromBlock: number;
|
||||
private readonly logEvery = 30 * 1000;
|
||||
private lastLog = 0;
|
||||
/** Current count of active validators in the state */
|
||||
private activatedValidatorCount: number;
|
||||
|
||||
constructor({config, eth1Provider, logger, signal, pendingStatus, maxBlocksPerPoll}: GenesisBuilderKwargs) {
|
||||
// at genesis builder, there is no genesis validator so we don't have a real BeaconConfig
|
||||
// but we need BeaconConfig to temporarily create CachedBeaconState, the cast here is safe since we don't use any getDomain here
|
||||
// the use of state as CachedBeaconState is just for convenient, GenesisResult returns TreeView anyway
|
||||
this.eth1Provider = eth1Provider;
|
||||
this.logger = logger;
|
||||
this.signal = signal;
|
||||
this.eth1Params = {
|
||||
...config,
|
||||
maxBlocksPerPoll: maxBlocksPerPoll ?? 10000,
|
||||
};
|
||||
|
||||
let stateView: BeaconStateAllForks;
|
||||
|
||||
if (pendingStatus) {
|
||||
this.logger.info("Restoring pending genesis state", {block: pendingStatus.lastProcessedBlockNumber});
|
||||
stateView = pendingStatus.state;
|
||||
this.depositTree = pendingStatus.depositTree;
|
||||
this.fromBlock = Math.max(pendingStatus.lastProcessedBlockNumber + 1, this.eth1Provider.deployBlock);
|
||||
} else {
|
||||
stateView = getGenesisBeaconState(
|
||||
config,
|
||||
ssz.phase0.Eth1Data.defaultValue(),
|
||||
getTemporaryBlockHeader(config, config.getForkTypes(GENESIS_SLOT).BeaconBlock.defaultValue())
|
||||
);
|
||||
this.depositTree = ssz.phase0.DepositDataRootList.defaultViewDU();
|
||||
this.fromBlock = this.eth1Provider.deployBlock;
|
||||
}
|
||||
|
||||
// TODO - PENDING: Ensure EpochCacheImmutableData is created only once
|
||||
this.state = createCachedBeaconState(stateView, createEmptyEpochCacheImmutableData(config, stateView));
|
||||
this.config = this.state.config;
|
||||
this.activatedValidatorCount = getActiveValidatorIndices(stateView, GENESIS_EPOCH).length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get eth1 deposit events and blocks and apply to this.state until we found genesis.
|
||||
*/
|
||||
async waitForGenesis(): Promise<GenesisResult> {
|
||||
await this.eth1Provider.validateContract();
|
||||
|
||||
// Load data from data from this.db.depositData, this.db.depositDataRoot
|
||||
// And start from a more recent fromBlock
|
||||
const blockNumberValidatorGenesis = await this.waitForGenesisValidators();
|
||||
|
||||
const depositsAndBlocksStream = getDepositsAndBlockStreamForGenesis(
|
||||
blockNumberValidatorGenesis,
|
||||
this.eth1Provider,
|
||||
this.eth1Params,
|
||||
this.signal
|
||||
);
|
||||
|
||||
for await (const [depositEvents, block] of depositsAndBlocksStream) {
|
||||
this.applyDeposits(depositEvents);
|
||||
applyTimestamp(this.config, this.state, block.timestamp);
|
||||
applyEth1BlockHash(this.state, block.blockHash);
|
||||
this.lastProcessedBlockNumber = block.blockNumber;
|
||||
|
||||
if (
|
||||
this.state.genesisTime >= this.config.MIN_GENESIS_TIME &&
|
||||
this.activatedValidatorCount >= this.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
) {
|
||||
this.logger.info("Found genesis state", {blockNumber: block.blockNumber});
|
||||
return {
|
||||
state: this.state,
|
||||
depositTree: this.depositTree,
|
||||
block,
|
||||
};
|
||||
}
|
||||
|
||||
this.throttledLog(`Waiting for min genesis time ${block.timestamp} / ${this.config.MIN_GENESIS_TIME}`);
|
||||
}
|
||||
|
||||
throw Error("depositsStream stopped without a valid genesis state");
|
||||
}
|
||||
|
||||
/**
|
||||
* First phase of waiting for genesis.
|
||||
* Stream deposits events in batches as big as possible without querying block data
|
||||
* @returns Block number at which there are enough active validators is state for genesis
|
||||
*/
|
||||
private async waitForGenesisValidators(): Promise<number> {
|
||||
const depositsStream = getDepositsStream(this.fromBlock, this.eth1Provider, this.eth1Params, this.signal);
|
||||
|
||||
for await (const {depositEvents, blockNumber} of depositsStream) {
|
||||
this.applyDeposits(depositEvents);
|
||||
this.lastProcessedBlockNumber = blockNumber;
|
||||
|
||||
if (this.activatedValidatorCount >= this.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT) {
|
||||
this.logger.info("Found enough genesis validators", {blockNumber});
|
||||
return blockNumber;
|
||||
}
|
||||
|
||||
this.throttledLog(
|
||||
`Found ${this.state.validators.length} / ${this.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT} validators to genesis`
|
||||
);
|
||||
}
|
||||
|
||||
throw Error("depositsStream stopped without a valid genesis state");
|
||||
}
|
||||
|
||||
private applyDeposits(depositEvents: phase0.DepositEvent[]): void {
|
||||
const newDeposits = depositEvents
|
||||
.filter((depositEvent) => !this.depositCache.has(depositEvent.index))
|
||||
.map((depositEvent) => {
|
||||
this.depositCache.add(depositEvent.index);
|
||||
this.depositTree.push(ssz.phase0.DepositData.hashTreeRoot(depositEvent.depositData));
|
||||
const gindex = toGindex(this.depositTree.type.depth, BigInt(depositEvent.index));
|
||||
|
||||
// Apply changes from the push above
|
||||
this.depositTree.commit();
|
||||
const depositTreeNode = this.depositTree.node;
|
||||
return {
|
||||
proof: new Tree(depositTreeNode).getSingleProof(gindex),
|
||||
data: depositEvent.depositData,
|
||||
};
|
||||
});
|
||||
|
||||
const {activatedValidatorCount} = applyDeposits(this.config, this.state, newDeposits, this.depositTree);
|
||||
this.activatedValidatorCount += activatedValidatorCount;
|
||||
|
||||
// TODO: If necessary persist deposits here to this.db.depositData, this.db.depositDataRoot
|
||||
}
|
||||
|
||||
/** Throttle genesis generation status log to prevent spamming */
|
||||
private throttledLog(message: string): void {
|
||||
if (Date.now() - this.lastLog > this.logEvery) {
|
||||
this.lastLog = Date.now();
|
||||
this.logger.info(message);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
import {CompositeViewDU, VectorCompositeType} from "@chainsafe/ssz";
|
||||
import {CachedBeaconStateAllForks} from "@lodestar/state-transition";
|
||||
import {ssz} from "@lodestar/types";
|
||||
import {Eth1Block} from "../../eth1/interface.js";
|
||||
|
||||
export type GenesisResult = {
|
||||
state: CachedBeaconStateAllForks;
|
||||
depositTree: CompositeViewDU<VectorCompositeType<typeof ssz.Root>>;
|
||||
block: Eth1Block;
|
||||
};
|
||||
|
||||
export interface IGenesisBuilder {
|
||||
waitForGenesis: () => Promise<GenesisResult>;
|
||||
}
|
||||
@@ -1,37 +1,11 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ZERO_HASH} from "@lodestar/params";
|
||||
import {
|
||||
BeaconStateAllForks,
|
||||
CachedBeaconStateAllForks,
|
||||
computeEpochAtSlot,
|
||||
computeStartSlotAtEpoch,
|
||||
} from "@lodestar/state-transition";
|
||||
import {BeaconStateAllForks, computeEpochAtSlot, computeStartSlotAtEpoch} from "@lodestar/state-transition";
|
||||
import {SignedBeaconBlock, ssz} from "@lodestar/types";
|
||||
import {Logger, toHex, toRootHex} from "@lodestar/utils";
|
||||
import {GENESIS_SLOT} from "../constants/index.js";
|
||||
import {IBeaconDb} from "../db/index.js";
|
||||
import {Eth1Provider} from "../eth1/index.js";
|
||||
import {Eth1Options} from "../eth1/options.js";
|
||||
import {Metrics} from "../metrics/index.js";
|
||||
import {GenesisBuilder} from "./genesis/genesis.js";
|
||||
import {GenesisResult} from "./genesis/interface.js";
|
||||
|
||||
export async function persistGenesisResult(
|
||||
db: IBeaconDb,
|
||||
genesisResult: GenesisResult,
|
||||
genesisBlock: SignedBeaconBlock
|
||||
): Promise<void> {
|
||||
await Promise.all([
|
||||
db.stateArchive.add(genesisResult.state),
|
||||
db.blockArchive.add(genesisBlock),
|
||||
db.depositDataRoot.putList(genesisResult.depositTree.getAllReadonlyValues()),
|
||||
db.eth1Data.put(genesisResult.block.timestamp, {
|
||||
...genesisResult.block,
|
||||
depositCount: genesisResult.depositTree.length,
|
||||
depositRoot: genesisResult.depositTree.hashTreeRoot(),
|
||||
}),
|
||||
]);
|
||||
}
|
||||
|
||||
export async function persistAnchorState(
|
||||
config: ChainForkConfig,
|
||||
@@ -75,76 +49,6 @@ export function createGenesisBlock(config: ChainForkConfig, genesisState: Beacon
|
||||
return genesisBlock;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize and persist a genesis state and related data
|
||||
*/
|
||||
export async function initStateFromEth1({
|
||||
config,
|
||||
db,
|
||||
logger,
|
||||
opts,
|
||||
signal,
|
||||
}: {
|
||||
config: ChainForkConfig;
|
||||
db: IBeaconDb;
|
||||
logger: Logger;
|
||||
opts: Eth1Options;
|
||||
signal: AbortSignal;
|
||||
}): Promise<CachedBeaconStateAllForks> {
|
||||
logger.info("Listening to eth1 for genesis state");
|
||||
|
||||
const statePreGenesis = await db.preGenesisState.get();
|
||||
const depositTree = await db.depositDataRoot.getDepositRootTree();
|
||||
const lastProcessedBlockNumber = await db.preGenesisStateLastProcessedBlock.get();
|
||||
|
||||
const builder = new GenesisBuilder({
|
||||
config,
|
||||
eth1Provider: new Eth1Provider(config, {...opts, logger}, signal),
|
||||
logger,
|
||||
signal,
|
||||
pendingStatus:
|
||||
statePreGenesis && depositTree !== undefined && lastProcessedBlockNumber != null
|
||||
? {state: statePreGenesis, depositTree, lastProcessedBlockNumber}
|
||||
: undefined,
|
||||
});
|
||||
|
||||
try {
|
||||
const genesisResult = await builder.waitForGenesis();
|
||||
|
||||
// Note: .hashTreeRoot() automatically commits()
|
||||
const genesisBlock = createGenesisBlock(config, genesisResult.state);
|
||||
const types = config.getForkTypes(GENESIS_SLOT);
|
||||
const stateRoot = genesisResult.state.hashTreeRoot();
|
||||
const blockRoot = types.BeaconBlock.hashTreeRoot(genesisBlock.message);
|
||||
|
||||
logger.info("Initializing genesis state", {
|
||||
stateRoot: toRootHex(stateRoot),
|
||||
blockRoot: toRootHex(blockRoot),
|
||||
validatorCount: genesisResult.state.validators.length,
|
||||
});
|
||||
|
||||
await persistGenesisResult(db, genesisResult, genesisBlock);
|
||||
|
||||
logger.verbose("Clearing pending genesis state if any");
|
||||
await db.preGenesisState.delete();
|
||||
await db.preGenesisStateLastProcessedBlock.delete();
|
||||
|
||||
return genesisResult.state;
|
||||
} catch (e) {
|
||||
if (builder.lastProcessedBlockNumber != null) {
|
||||
logger.info("Persisting genesis state", {block: builder.lastProcessedBlockNumber});
|
||||
|
||||
// Commit changed before serializing
|
||||
builder.state.commit();
|
||||
|
||||
await db.preGenesisState.put(builder.state);
|
||||
await db.depositDataRoot.putList(builder.depositTree.getAllReadonlyValues());
|
||||
await db.preGenesisStateLastProcessedBlock.put(builder.lastProcessedBlockNumber);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Restore the latest beacon state from db
|
||||
*/
|
||||
|
||||
@@ -23,9 +23,9 @@ import {
|
||||
altair,
|
||||
capella,
|
||||
phase0,
|
||||
rewards,
|
||||
} from "@lodestar/types";
|
||||
import {Logger} from "@lodestar/utils";
|
||||
import {IEth1ForBlockProduction} from "../eth1/index.js";
|
||||
import {IExecutionBuilder, IExecutionEngine} from "../execution/index.js";
|
||||
import {Metrics} from "../metrics/metrics.js";
|
||||
import {BufferPool} from "../util/bufferPool.js";
|
||||
@@ -49,9 +49,6 @@ import {IChainOptions} from "./options.js";
|
||||
import {AssembledBlockType, BlockAttributes, BlockType, ProduceResult} from "./produceBlock/produceBlockBody.js";
|
||||
import {IStateRegenerator, RegenCaller} from "./regen/index.js";
|
||||
import {ReprocessController} from "./reprocess.js";
|
||||
import {AttestationsRewards} from "./rewards/attestationsRewards.js";
|
||||
import {BlockRewards} from "./rewards/blockRewards.js";
|
||||
import {SyncCommitteeRewards} from "./rewards/syncCommitteeRewards.js";
|
||||
import {
|
||||
SeenAggregators,
|
||||
SeenAttesters,
|
||||
@@ -88,7 +85,6 @@ export interface IBeaconChain {
|
||||
readonly genesisTime: UintNum64;
|
||||
readonly genesisValidatorsRoot: Root;
|
||||
readonly earliestAvailableSlot: Slot;
|
||||
readonly eth1: IEth1ForBlockProduction;
|
||||
readonly executionEngine: IExecutionEngine;
|
||||
readonly executionBuilder?: IExecutionBuilder;
|
||||
// Expose config for convenience in modularized functions
|
||||
@@ -257,15 +253,15 @@ export interface IBeaconChain {
|
||||
regenCanAcceptWork(): boolean;
|
||||
blsThreadPoolCanAcceptWork(): boolean;
|
||||
|
||||
getBlockRewards(blockRef: BeaconBlock | BlindedBeaconBlock): Promise<BlockRewards>;
|
||||
getBlockRewards(blockRef: BeaconBlock | BlindedBeaconBlock): Promise<rewards.BlockRewards>;
|
||||
getAttestationsRewards(
|
||||
epoch: Epoch,
|
||||
validatorIds?: (ValidatorIndex | string)[]
|
||||
): Promise<{rewards: AttestationsRewards; executionOptimistic: boolean; finalized: boolean}>;
|
||||
): Promise<{rewards: rewards.AttestationsRewards; executionOptimistic: boolean; finalized: boolean}>;
|
||||
getSyncCommitteeRewards(
|
||||
blockRef: BeaconBlock | BlindedBeaconBlock,
|
||||
validatorIds?: (ValidatorIndex | string)[]
|
||||
): Promise<SyncCommitteeRewards>;
|
||||
): Promise<rewards.SyncCommitteeRewards>;
|
||||
}
|
||||
|
||||
export type SSZObjectType =
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import {Signature, aggregateSignatures} from "@chainsafe/blst";
|
||||
import {BitArray} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {IForkChoice} from "@lodestar/fork-choice";
|
||||
import {
|
||||
ForkName,
|
||||
@@ -162,7 +162,7 @@ export class AggregatedAttestationPool {
|
||||
private lowestPermissibleSlot = 0;
|
||||
|
||||
constructor(
|
||||
private readonly config: ChainForkConfig,
|
||||
private readonly config: BeaconConfig,
|
||||
private readonly metrics: Metrics | null = null
|
||||
) {
|
||||
metrics?.opPool.aggregatedAttestationPool.attDataPerSlot.addCollect(() => this.onScrapeMetrics(metrics));
|
||||
@@ -249,7 +249,7 @@ export class AggregatedAttestationPool {
|
||||
const stateEpoch = state.epochCtx.epoch;
|
||||
const statePrevEpoch = stateEpoch - 1;
|
||||
|
||||
const notSeenValidatorsFn = getNotSeenValidatorsFn(state);
|
||||
const notSeenValidatorsFn = getNotSeenValidatorsFn(this.config, state);
|
||||
const validateAttestationDataFn = getValidateAttestationDataFn(forkChoice, state);
|
||||
|
||||
const attestationsByScore: AttestationWithScore[] = [];
|
||||
@@ -362,7 +362,7 @@ export class AggregatedAttestationPool {
|
||||
const statePrevEpoch = stateEpoch - 1;
|
||||
const rootCache = new RootCache(state);
|
||||
|
||||
const notSeenValidatorsFn = getNotSeenValidatorsFn(state);
|
||||
const notSeenValidatorsFn = getNotSeenValidatorsFn(this.config, state);
|
||||
const validateAttestationDataFn = getValidateAttestationDataFn(forkChoice, state);
|
||||
|
||||
const slots = Array.from(this.attestationGroupByIndexByDataHexBySlot.keys()).sort((a, b) => b - a);
|
||||
@@ -656,7 +656,7 @@ export class MatchingDataAttestationGroup {
|
||||
private readonly attestations: AttestationWithIndex[] = [];
|
||||
|
||||
constructor(
|
||||
private readonly config: ChainForkConfig,
|
||||
private readonly config: BeaconConfig,
|
||||
readonly committee: Uint32Array,
|
||||
readonly data: phase0.AttestationData
|
||||
) {}
|
||||
@@ -864,9 +864,9 @@ export function aggregateConsolidation({byCommittee, attData}: AttestationsConso
|
||||
* Pre-compute participation from a CachedBeaconStateAllForks, for use to check if an attestation's committee
|
||||
* has already attested or not.
|
||||
*/
|
||||
export function getNotSeenValidatorsFn(state: CachedBeaconStateAllForks): GetNotSeenValidatorsFn {
|
||||
export function getNotSeenValidatorsFn(config: BeaconConfig, state: CachedBeaconStateAllForks): GetNotSeenValidatorsFn {
|
||||
const stateSlot = state.slot;
|
||||
if (state.config.getForkName(stateSlot) === ForkName.phase0) {
|
||||
if (config.getForkName(stateSlot) === ForkName.phase0) {
|
||||
// Get attestations to be included in a phase0 block.
|
||||
// As we are close to altair, this is not really important, it's mainly for e2e.
|
||||
// The performance is not great due to the different BeaconState data structure to altair.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {Id, Repository} from "@lodestar/db";
|
||||
import {
|
||||
BLS_WITHDRAWAL_PREFIX,
|
||||
@@ -51,6 +52,8 @@ export class OpPool {
|
||||
/** Map of validator index -> SignedBLSToExecutionChange */
|
||||
private readonly blsToExecutionChanges = new Map<ValidatorIndex, SignedBLSToExecutionChangeVersioned>();
|
||||
|
||||
constructor(private readonly config: BeaconConfig) {}
|
||||
|
||||
// Getters for metrics
|
||||
|
||||
get attesterSlashingsSize(): number {
|
||||
@@ -191,9 +194,8 @@ export class OpPool {
|
||||
phase0.SignedVoluntaryExit[],
|
||||
capella.SignedBLSToExecutionChange[],
|
||||
] {
|
||||
const {config} = state;
|
||||
const stateEpoch = computeEpochAtSlot(state.slot);
|
||||
const stateFork = config.getForkSeq(state.slot);
|
||||
const stateFork = this.config.getForkSeq(state.slot);
|
||||
const toBeSlashedIndices = new Set<ValidatorIndex>();
|
||||
const proposerSlashings: phase0.ProposerSlashing[] = [];
|
||||
|
||||
@@ -265,7 +267,7 @@ export class OpPool {
|
||||
// a future fork.
|
||||
isVoluntaryExitSignatureIncludable(
|
||||
stateFork,
|
||||
config.getForkSeq(computeStartSlotAtEpoch(voluntaryExit.message.epoch))
|
||||
this.config.getForkSeq(computeStartSlotAtEpoch(voluntaryExit.message.epoch))
|
||||
)
|
||||
) {
|
||||
voluntaryExits.push(voluntaryExit);
|
||||
@@ -368,14 +370,13 @@ export class OpPool {
|
||||
* Prune if validator has already exited at or before the finalized checkpoint of the head.
|
||||
*/
|
||||
private pruneVoluntaryExits(headState: CachedBeaconStateAllForks): void {
|
||||
const {config} = headState;
|
||||
const headStateFork = config.getForkSeq(headState.slot);
|
||||
const headStateFork = this.config.getForkSeq(headState.slot);
|
||||
const finalizedEpoch = headState.finalizedCheckpoint.epoch;
|
||||
|
||||
for (const [key, voluntaryExit] of this.voluntaryExits.entries()) {
|
||||
// VoluntaryExit messages signed in the previous fork become invalid and can never be included in any future
|
||||
// block, so just drop as the head state advances into the next fork.
|
||||
if (config.getForkSeq(computeStartSlotAtEpoch(voluntaryExit.message.epoch)) < headStateFork) {
|
||||
if (this.config.getForkSeq(computeStartSlotAtEpoch(voluntaryExit.message.epoch)) < headStateFork) {
|
||||
this.voluntaryExits.delete(key);
|
||||
}
|
||||
|
||||
@@ -392,9 +393,8 @@ export class OpPool {
|
||||
* to opPool once gossipsub seen cache TTL passes.
|
||||
*/
|
||||
private pruneBlsToExecutionChanges(headBlock: SignedBeaconBlock, headState: CachedBeaconStateAllForks): void {
|
||||
const {config} = headState;
|
||||
const recentBlsToExecutionChanges =
|
||||
config.getForkSeq(headBlock.message.slot) >= ForkSeq.capella
|
||||
this.config.getForkSeq(headBlock.message.slot) >= ForkSeq.capella
|
||||
? (headBlock as capella.SignedBeaconBlock).message.body.blsToExecutionChanges
|
||||
: [];
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import {SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY} from "@lodestar/params";
|
||||
import {defaultOptions as defaultValidatorOptions} from "@lodestar/validator";
|
||||
import {DEFAULT_ARCHIVE_MODE} from "./archiveStore/constants.js";
|
||||
import {ArchiveMode, ArchiveStoreOpts} from "./archiveStore/interface.js";
|
||||
@@ -56,10 +55,6 @@ export type BlockProcessOpts = {
|
||||
* Will double processing times. Use only for debugging purposes.
|
||||
*/
|
||||
disableBlsBatchVerify?: boolean;
|
||||
/**
|
||||
* Override SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY
|
||||
*/
|
||||
safeSlotsToImportOptimistically?: number;
|
||||
/**
|
||||
* Assert progressive balances the same to EpochTransitionCache
|
||||
*/
|
||||
@@ -109,7 +104,6 @@ export const defaultChainOptions: IChainOptions = {
|
||||
proposerBoost: true,
|
||||
proposerBoostReorg: true,
|
||||
computeUnrealized: true,
|
||||
safeSlotsToImportOptimistically: SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY,
|
||||
suggestedFeeRecipient: defaultValidatorOptions.suggestedFeeRecipient,
|
||||
serveHistoricalState: false,
|
||||
assertCorrectProgressiveBalances: false,
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
import {routes} from "@lodestar/api";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {getSafeExecutionBlockHash} from "@lodestar/fork-choice";
|
||||
import {ForkPostBellatrix, ForkSeq, SLOTS_PER_EPOCH, isForkPostElectra} from "@lodestar/params";
|
||||
import {ForkPostBellatrix, ForkSeq, SLOTS_PER_EPOCH} from "@lodestar/params";
|
||||
import {
|
||||
BeaconStateElectra,
|
||||
CachedBeaconStateAllForks,
|
||||
CachedBeaconStateExecutions,
|
||||
StateHashTreeRootSource,
|
||||
@@ -197,7 +196,7 @@ export class PrepareNextSlotScheduler {
|
||||
this.chain.opts.emitPayloadAttributes === true &&
|
||||
this.chain.emitter.listenerCount(routes.events.EventType.payloadAttributes)
|
||||
) {
|
||||
const data = await getPayloadAttributesForSSE(fork as ForkPostBellatrix, this.chain, {
|
||||
const data = getPayloadAttributesForSSE(fork as ForkPostBellatrix, this.chain, {
|
||||
prepareState: updatedPrepareState,
|
||||
prepareSlot,
|
||||
parentBlockRoot: fromHex(headRoot),
|
||||
@@ -222,9 +221,6 @@ export class PrepareNextSlotScheduler {
|
||||
}
|
||||
this.metrics?.precomputeNextEpochTransition.hits.set(previousHits ?? 0);
|
||||
|
||||
// Check if we can stop polling eth1 data
|
||||
this.stopEth1Polling();
|
||||
|
||||
this.logger.verbose("Completed PrepareNextSlotScheduler epoch transition", {
|
||||
nextEpoch,
|
||||
headSlot,
|
||||
@@ -252,27 +248,4 @@ export class PrepareNextSlotScheduler {
|
||||
state.hashTreeRoot();
|
||||
hashTreeRootTimer?.();
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop eth1 data polling after eth1_deposit_index has reached deposit_requests_start_index in Electra as described in EIP-6110
|
||||
*/
|
||||
stopEth1Polling(): void {
|
||||
// Only continue if eth1 is still polling and finalized checkpoint is in Electra. State regen is expensive
|
||||
if (this.chain.eth1.isPollingEth1Data()) {
|
||||
const finalizedCheckpoint = this.chain.forkChoice.getFinalizedCheckpoint();
|
||||
const checkpointFork = this.config.getForkInfoAtEpoch(finalizedCheckpoint.epoch).name;
|
||||
|
||||
if (isForkPostElectra(checkpointFork)) {
|
||||
const finalizedState = this.chain.getStateByCheckpoint(finalizedCheckpoint)?.state;
|
||||
|
||||
if (
|
||||
finalizedState !== undefined &&
|
||||
finalizedState.eth1DepositIndex === Number((finalizedState as BeaconStateElectra).depositRequestsStartIndex)
|
||||
) {
|
||||
// Signal eth1 to stop polling eth1Data
|
||||
this.chain.eth1.stopPollingEth1Data();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,10 +17,8 @@ import {
|
||||
CachedBeaconStateCapella,
|
||||
CachedBeaconStateExecutions,
|
||||
computeTimeAtSlot,
|
||||
getCurrentEpoch,
|
||||
getExpectedWithdrawals,
|
||||
getRandaoMix,
|
||||
isMergeTransitionComplete,
|
||||
} from "@lodestar/state-transition";
|
||||
import {
|
||||
BLSPubkey,
|
||||
@@ -44,13 +42,10 @@ import {
|
||||
deneb,
|
||||
electra,
|
||||
fulu,
|
||||
ssz,
|
||||
sszTypesFor,
|
||||
} from "@lodestar/types";
|
||||
import {Logger, sleep, toHex, toPubkeyHex, toRootHex} from "@lodestar/utils";
|
||||
import {ZERO_HASH, ZERO_HASH_HEX} from "../../constants/index.js";
|
||||
import {IEth1ForBlockProduction} from "../../eth1/index.js";
|
||||
import {numToQuantity} from "../../eth1/provider/utils.js";
|
||||
import {ZERO_HASH_HEX} from "../../constants/index.js";
|
||||
import {numToQuantity} from "../../execution/engine/utils.js";
|
||||
import {
|
||||
IExecutionBuilder,
|
||||
IExecutionEngine,
|
||||
@@ -83,7 +78,6 @@ export enum BlockProductionStep {
|
||||
voluntaryExits = "voluntaryExits",
|
||||
blsToExecutionChanges = "blsToExecutionChanges",
|
||||
attestations = "attestations",
|
||||
eth1DataAndDeposits = "eth1DataAndDeposits",
|
||||
syncAggregate = "syncAggregate",
|
||||
executionPayload = "executionPayload",
|
||||
}
|
||||
@@ -171,7 +165,7 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
// even though shouldOverrideBuilder is relevant for the engine response, for simplicity of typing
|
||||
// we just return it undefined for the builder which anyway doesn't get consumed downstream
|
||||
let shouldOverrideBuilder: boolean | undefined;
|
||||
const fork = currentState.config.getForkName(blockSlot);
|
||||
const fork = this.config.getForkName(blockSlot);
|
||||
const produceResult = {
|
||||
type: blockType,
|
||||
fork,
|
||||
@@ -337,14 +331,6 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
feeRecipient
|
||||
);
|
||||
|
||||
if (prepareRes.isPremerge) {
|
||||
return {
|
||||
...prepareRes,
|
||||
executionPayload: sszTypesFor(fork).ExecutionPayload.defaultValue(),
|
||||
executionPayloadValue: BigInt(0),
|
||||
};
|
||||
}
|
||||
|
||||
const {prepType, payloadId} = prepareRes;
|
||||
Object.assign(logMeta, {executionPayloadPrepType: prepType});
|
||||
|
||||
@@ -366,37 +352,14 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
|
||||
return {...prepareRes, ...payloadRes};
|
||||
})().catch((e) => {
|
||||
// catch payload fetch here, because there is still a recovery path possible if we
|
||||
// are pre-merge. We don't care the same for builder segment as the execution block
|
||||
// will takeover if the builder flow was activated and errors
|
||||
this.metrics?.blockPayload.payloadFetchErrors.inc();
|
||||
|
||||
if (!isMergeTransitionComplete(currentState as CachedBeaconStateBellatrix)) {
|
||||
this.logger?.warn(
|
||||
"Fetch payload from the execution failed, however since we are still pre-merge proceeding with an empty one.",
|
||||
{},
|
||||
e as Error
|
||||
);
|
||||
// ok we don't have an execution payload here, so we can assign an empty one
|
||||
// if pre-merge
|
||||
return {
|
||||
isPremerge: true as const,
|
||||
executionPayload: sszTypesFor(fork).ExecutionPayload.defaultValue(),
|
||||
executionPayloadValue: BigInt(0),
|
||||
};
|
||||
}
|
||||
// since merge transition is complete, we need a valid payload even if with an
|
||||
// empty (transactions) one. defaultValue isn't gonna cut it!
|
||||
throw e;
|
||||
});
|
||||
|
||||
const [engineRes, commonBlockBody] = await Promise.all([enginePromise, commonBlockBodyPromise]);
|
||||
blockBody = Object.assign({}, commonBlockBody) as AssembledBodyType<BlockType.Blinded>;
|
||||
|
||||
if (engineRes.isPremerge) {
|
||||
(blockBody as BeaconBlockBody<ForkPostBellatrix & ForkPreGloas>).executionPayload = engineRes.executionPayload;
|
||||
executionPayloadValue = engineRes.executionPayloadValue;
|
||||
} else {
|
||||
{
|
||||
const {prepType, payloadId, executionPayload, blobsBundle, executionRequests} = engineRes;
|
||||
shouldOverrideBuilder = engineRes.shouldOverrideBuilder;
|
||||
|
||||
@@ -504,15 +467,10 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
}
|
||||
|
||||
/**
|
||||
* Produce ExecutionPayload for pre-merge, merge, and post-merge.
|
||||
*
|
||||
* Expects `eth1MergeBlockFinder` to be actively searching for blocks well in advance to being called.
|
||||
*
|
||||
* @returns PayloadId = pow block found, null = pow NOT found
|
||||
* Produce ExecutionPayload for post-merge.
|
||||
*/
|
||||
export async function prepareExecutionPayload(
|
||||
chain: {
|
||||
eth1: IEth1ForBlockProduction;
|
||||
executionEngine: IExecutionEngine;
|
||||
config: ChainForkConfig;
|
||||
},
|
||||
@@ -523,14 +481,8 @@ export async function prepareExecutionPayload(
|
||||
finalizedBlockHash: RootHex,
|
||||
state: CachedBeaconStateExecutions,
|
||||
suggestedFeeRecipient: string
|
||||
): Promise<{isPremerge: true} | {isPremerge: false; prepType: PayloadPreparationType; payloadId: PayloadId}> {
|
||||
const parentHashRes = await getExecutionPayloadParentHash(chain, state);
|
||||
if (parentHashRes.isPremerge) {
|
||||
// Return null only if the execution is pre-merge
|
||||
return {isPremerge: true};
|
||||
}
|
||||
|
||||
const {parentHash} = parentHashRes;
|
||||
): Promise<{prepType: PayloadPreparationType; payloadId: PayloadId}> {
|
||||
const parentHash = state.latestExecutionPayloadHeader.blockHash;
|
||||
const timestamp = computeTimeAtSlot(chain.config, state.slot, state.genesisTime);
|
||||
const prevRandao = getRandaoMix(state, state.epochCtx.epoch);
|
||||
|
||||
@@ -586,12 +538,11 @@ export async function prepareExecutionPayload(
|
||||
// We are only returning payloadId here because prepareExecutionPayload is also called from
|
||||
// prepareNextSlot, which is an advance call to execution engine to start building payload
|
||||
// Actual payload isn't produced till getPayload is called.
|
||||
return {isPremerge: false, payloadId, prepType};
|
||||
return {payloadId, prepType};
|
||||
}
|
||||
|
||||
async function prepareExecutionPayloadHeader(
|
||||
chain: {
|
||||
eth1: IEth1ForBlockProduction;
|
||||
executionBuilder?: IExecutionBuilder;
|
||||
config: ChainForkConfig;
|
||||
},
|
||||
@@ -608,53 +559,13 @@ async function prepareExecutionPayloadHeader(
|
||||
throw Error("executionBuilder required");
|
||||
}
|
||||
|
||||
const parentHashRes = await getExecutionPayloadParentHash(chain, state);
|
||||
if (parentHashRes.isPremerge) {
|
||||
throw Error("External builder disabled pre-merge");
|
||||
}
|
||||
|
||||
const {parentHash} = parentHashRes;
|
||||
const parentHash = state.latestExecutionPayloadHeader.blockHash;
|
||||
return chain.executionBuilder.getHeader(fork, state.slot, parentHash, proposerPubKey);
|
||||
}
|
||||
|
||||
export async function getExecutionPayloadParentHash(
|
||||
chain: {
|
||||
eth1: IEth1ForBlockProduction;
|
||||
config: ChainForkConfig;
|
||||
},
|
||||
state: CachedBeaconStateExecutions
|
||||
): Promise<{isPremerge: true} | {isPremerge: false; parentHash: Root}> {
|
||||
// Use different POW block hash parent for block production based on merge status.
|
||||
// Returned value of null == using an empty ExecutionPayload value
|
||||
if (isMergeTransitionComplete(state)) {
|
||||
// Post-merge, normal payload
|
||||
return {isPremerge: false, parentHash: state.latestExecutionPayloadHeader.blockHash};
|
||||
}
|
||||
|
||||
if (
|
||||
!ssz.Root.equals(chain.config.TERMINAL_BLOCK_HASH, ZERO_HASH) &&
|
||||
getCurrentEpoch(state) < chain.config.TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
) {
|
||||
throw new Error(
|
||||
`InvalidMergeTBH epoch: expected >= ${
|
||||
chain.config.TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
}, actual: ${getCurrentEpoch(state)}`
|
||||
);
|
||||
}
|
||||
|
||||
const terminalPowBlockHash = await chain.eth1.getTerminalPowBlock();
|
||||
if (terminalPowBlockHash === null) {
|
||||
// Pre-merge, no prepare payload call is needed
|
||||
return {isPremerge: true};
|
||||
}
|
||||
// Signify merge via producing on top of the last PoW block
|
||||
return {isPremerge: false, parentHash: terminalPowBlockHash};
|
||||
}
|
||||
|
||||
export async function getPayloadAttributesForSSE(
|
||||
export function getPayloadAttributesForSSE(
|
||||
fork: ForkPostBellatrix,
|
||||
chain: {
|
||||
eth1: IEth1ForBlockProduction;
|
||||
config: ChainForkConfig;
|
||||
},
|
||||
{
|
||||
@@ -663,30 +574,23 @@ export async function getPayloadAttributesForSSE(
|
||||
parentBlockRoot,
|
||||
feeRecipient,
|
||||
}: {prepareState: CachedBeaconStateExecutions; prepareSlot: Slot; parentBlockRoot: Root; feeRecipient: string}
|
||||
): Promise<SSEPayloadAttributes> {
|
||||
const parentHashRes = await getExecutionPayloadParentHash(chain, prepareState);
|
||||
|
||||
if (!parentHashRes.isPremerge) {
|
||||
const {parentHash} = parentHashRes;
|
||||
const payloadAttributes = preparePayloadAttributes(fork, chain, {
|
||||
prepareState,
|
||||
prepareSlot,
|
||||
parentBlockRoot,
|
||||
feeRecipient,
|
||||
});
|
||||
|
||||
const ssePayloadAttributes: SSEPayloadAttributes = {
|
||||
proposerIndex: prepareState.epochCtx.getBeaconProposer(prepareSlot),
|
||||
proposalSlot: prepareSlot,
|
||||
parentBlockNumber: prepareState.latestExecutionPayloadHeader.blockNumber,
|
||||
parentBlockRoot,
|
||||
parentBlockHash: parentHash,
|
||||
payloadAttributes,
|
||||
};
|
||||
return ssePayloadAttributes;
|
||||
}
|
||||
|
||||
throw Error("The execution is still pre-merge");
|
||||
): SSEPayloadAttributes {
|
||||
const parentHash = prepareState.latestExecutionPayloadHeader.blockHash;
|
||||
const payloadAttributes = preparePayloadAttributes(fork, chain, {
|
||||
prepareState,
|
||||
prepareSlot,
|
||||
parentBlockRoot,
|
||||
feeRecipient,
|
||||
});
|
||||
const ssePayloadAttributes: SSEPayloadAttributes = {
|
||||
proposerIndex: prepareState.epochCtx.getBeaconProposer(prepareSlot),
|
||||
proposalSlot: prepareSlot,
|
||||
parentBlockNumber: prepareState.latestExecutionPayloadHeader.blockNumber,
|
||||
parentBlockRoot,
|
||||
parentBlockHash: parentHash,
|
||||
payloadAttributes,
|
||||
};
|
||||
return ssePayloadAttributes;
|
||||
}
|
||||
|
||||
function preparePayloadAttributes(
|
||||
@@ -740,7 +644,7 @@ export async function produceCommonBlockBody<T extends BlockType>(
|
||||
? this.metrics?.executionBlockProductionTimeSteps
|
||||
: this.metrics?.builderBlockProductionTimeSteps;
|
||||
|
||||
const fork = currentState.config.getForkName(slot);
|
||||
const fork = this.config.getForkName(slot);
|
||||
|
||||
// TODO:
|
||||
// Iterate through the naive aggregation pool and ensure all the attestations from there
|
||||
@@ -762,20 +666,17 @@ export async function produceCommonBlockBody<T extends BlockType>(
|
||||
step: BlockProductionStep.attestations,
|
||||
});
|
||||
|
||||
const endEth1DataAndDeposits = stepsMetrics?.startTimer();
|
||||
const {eth1Data, deposits} = await this.eth1.getEth1DataAndDeposits(currentState);
|
||||
endEth1DataAndDeposits?.({
|
||||
step: BlockProductionStep.eth1DataAndDeposits,
|
||||
});
|
||||
|
||||
const blockBody: Omit<CommonBlockBody, "blsToExecutionChanges" | "syncAggregate"> = {
|
||||
randaoReveal,
|
||||
graffiti,
|
||||
eth1Data,
|
||||
// Eth1 data voting is no longer required since electra
|
||||
eth1Data: currentState.eth1Data,
|
||||
proposerSlashings,
|
||||
attesterSlashings,
|
||||
attestations,
|
||||
deposits,
|
||||
// Since electra, deposits are processed by the execution layer,
|
||||
// we no longer support handling deposits from earlier forks.
|
||||
deposits: [],
|
||||
voluntaryExits,
|
||||
};
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import {routes} from "@lodestar/api";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {
|
||||
CachedBeaconStateAllForks,
|
||||
computeStartSlotAtEpoch,
|
||||
@@ -24,6 +25,7 @@ export type PersistentCheckpointStateCacheOpts = {
|
||||
};
|
||||
|
||||
type PersistentCheckpointStateCacheModules = {
|
||||
config: BeaconConfig;
|
||||
metrics?: Metrics | null;
|
||||
logger: Logger;
|
||||
clock?: IClock | null;
|
||||
@@ -107,6 +109,7 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
||||
private readonly cache: MapTracker<CacheKey, CacheItem>;
|
||||
/** Epoch -> Set<blockRoot> */
|
||||
private readonly epochIndex = new MapDef<Epoch, Set<RootHex>>(() => new Set<string>());
|
||||
private readonly config: BeaconConfig;
|
||||
private readonly metrics: Metrics | null | undefined;
|
||||
private readonly logger: Logger;
|
||||
private readonly clock: IClock | null | undefined;
|
||||
@@ -120,10 +123,20 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
||||
private readonly bufferPool?: BufferPool | null;
|
||||
|
||||
constructor(
|
||||
{metrics, logger, clock, signal, datastore, blockStateCache, bufferPool}: PersistentCheckpointStateCacheModules,
|
||||
{
|
||||
config,
|
||||
metrics,
|
||||
logger,
|
||||
clock,
|
||||
signal,
|
||||
datastore,
|
||||
blockStateCache,
|
||||
bufferPool,
|
||||
}: PersistentCheckpointStateCacheModules,
|
||||
opts: PersistentCheckpointStateCacheOpts
|
||||
) {
|
||||
this.cache = new MapTracker(metrics?.cpStateCache);
|
||||
this.config = config;
|
||||
if (metrics) {
|
||||
this.metrics = metrics;
|
||||
metrics.cpStateCache.size.addCollect(() => {
|
||||
@@ -484,7 +497,7 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
||||
}
|
||||
|
||||
const blockSlot = state.slot;
|
||||
const processCPStatesTimeMs = state.config.getSlotComponentDurationMs(PROCESS_CHECKPOINT_STATES_BPS);
|
||||
const processCPStatesTimeMs = this.config.getSlotComponentDurationMs(PROCESS_CHECKPOINT_STATES_BPS);
|
||||
// we always have clock in production, fallback value is only for test
|
||||
const msFromSlot = this.clock?.msFromSlot(blockSlot) ?? processCPStatesTimeMs;
|
||||
const msToProcessCPStates = processCPStatesTimeMs - msFromSlot;
|
||||
|
||||
@@ -245,6 +245,18 @@ async function validateAggregateAndProof(
|
||||
});
|
||||
}
|
||||
|
||||
// Same race-condition check as above for seen aggregators
|
||||
if (
|
||||
!skipValidationKnownAttesters &&
|
||||
chain.seenAggregatedAttestations.isKnown(targetEpoch, attIndex, attDataRootHex, aggregationBits)
|
||||
) {
|
||||
throw new AttestationError(GossipAction.IGNORE, {
|
||||
code: AttestationErrorCode.ATTESTERS_ALREADY_KNOWN,
|
||||
targetEpoch,
|
||||
aggregateRoot: attDataRootHex,
|
||||
});
|
||||
}
|
||||
|
||||
chain.seenAggregators.add(targetEpoch, aggregatorIndex);
|
||||
chain.seenAggregatedAttestations.add(
|
||||
targetEpoch,
|
||||
|
||||
@@ -43,7 +43,7 @@ export async function validateAttesterSlashing(
|
||||
// [REJECT] All of the conditions within process_attester_slashing pass validation.
|
||||
try {
|
||||
// verifySignature = false, verified in batch below
|
||||
assertValidAttesterSlashing(state, attesterSlashing, false);
|
||||
assertValidAttesterSlashing(chain.index2pubkey, state, attesterSlashing, false);
|
||||
} catch (e) {
|
||||
throw new AttesterSlashingError(GossipAction.REJECT, {
|
||||
code: AttesterSlashingErrorCode.INVALID,
|
||||
@@ -51,7 +51,12 @@ export async function validateAttesterSlashing(
|
||||
});
|
||||
}
|
||||
|
||||
const signatureSets = getAttesterSlashingSignatureSets(state, attesterSlashing);
|
||||
const signatureSets = getAttesterSlashingSignatureSets(
|
||||
chain.config,
|
||||
chain.index2pubkey,
|
||||
state.slot,
|
||||
attesterSlashing
|
||||
);
|
||||
if (!(await chain.bls.verifySignatureSets(signatureSets, {batchable: true, priority: prioritizeBls}))) {
|
||||
throw new AttesterSlashingError(GossipAction.REJECT, {
|
||||
code: AttesterSlashingErrorCode.INVALID,
|
||||
|
||||
@@ -137,7 +137,11 @@ export async function validateGossipBlobSidecar(
|
||||
// [REJECT] The proposer signature, signed_beacon_block.signature, is valid with respect to the proposer_index pubkey.
|
||||
const signature = blobSidecar.signedBlockHeader.signature;
|
||||
if (!chain.seenBlockInputCache.isVerifiedProposerSignature(blobSlot, blockHex, signature)) {
|
||||
const signatureSet = getBlockHeaderProposerSignatureSetByParentStateSlot(blockState, blobSidecar.signedBlockHeader);
|
||||
const signatureSet = getBlockHeaderProposerSignatureSetByParentStateSlot(
|
||||
chain.index2pubkey,
|
||||
blockState,
|
||||
blobSidecar.signedBlockHeader
|
||||
);
|
||||
// Don't batch so verification is not delayed
|
||||
if (!(await chain.bls.verifySignatureSets([signatureSet], {verifyOnMainThread: true}))) {
|
||||
throw new BlobSidecarGossipError(GossipAction.REJECT, {
|
||||
@@ -240,7 +244,11 @@ export async function validateBlockBlobSidecars(
|
||||
const signature = firstSidecarSignedBlockHeader.signature;
|
||||
if (!chain.seenBlockInputCache.isVerifiedProposerSignature(blockSlot, blockRootHex, signature)) {
|
||||
const headState = await chain.getHeadState();
|
||||
const signatureSet = getBlockHeaderProposerSignatureSetByHeaderSlot(headState, firstSidecarSignedBlockHeader);
|
||||
const signatureSet = getBlockHeaderProposerSignatureSetByHeaderSlot(
|
||||
chain.index2pubkey,
|
||||
headState,
|
||||
firstSidecarSignedBlockHeader
|
||||
);
|
||||
|
||||
if (
|
||||
!(await chain.bls.verifySignatureSets([signatureSet], {
|
||||
|
||||
@@ -154,7 +154,7 @@ export async function validateGossipBlock(
|
||||
|
||||
// [REJECT] The proposer signature, signed_beacon_block.signature, is valid with respect to the proposer_index pubkey.
|
||||
if (!chain.seenBlockInputCache.isVerifiedProposerSignature(blockSlot, blockRoot, signedBlock.signature)) {
|
||||
const signatureSet = getBlockProposerSignatureSet(blockState, signedBlock);
|
||||
const signatureSet = getBlockProposerSignatureSet(chain.config, chain.index2pubkey, signedBlock);
|
||||
// Don't batch so verification is not delayed
|
||||
if (!(await chain.bls.verifySignatureSets([signatureSet], {verifyOnMainThread: true}))) {
|
||||
throw new BlockGossipError(GossipAction.REJECT, {
|
||||
|
||||
@@ -41,7 +41,7 @@ async function validateBlsToExecutionChange(
|
||||
// NOTE: No need to advance head state since the signature's fork is handled with `broadcastedOnFork`,
|
||||
// and chanes relevant to `isValidBlsToExecutionChange()` happen only on processBlock(), not processEpoch()
|
||||
const state = chain.getHeadState();
|
||||
const {config} = state;
|
||||
const {config} = chain;
|
||||
|
||||
// [REJECT] All of the conditions within process_bls_to_execution_change pass validation.
|
||||
// verifySignature = false, verified in batch below
|
||||
|
||||
@@ -135,6 +135,7 @@ export async function validateGossipDataColumnSidecar(
|
||||
const signature = dataColumnSidecar.signedBlockHeader.signature;
|
||||
if (!chain.seenBlockInputCache.isVerifiedProposerSignature(blockHeader.slot, blockRootHex, signature)) {
|
||||
const signatureSet = getBlockHeaderProposerSignatureSetByParentStateSlot(
|
||||
chain.index2pubkey,
|
||||
blockState,
|
||||
dataColumnSidecar.signedBlockHeader
|
||||
);
|
||||
@@ -336,7 +337,11 @@ export async function validateBlockDataColumnSidecars(
|
||||
const signature = firstSidecarSignedBlockHeader.signature;
|
||||
if (!chain.seenBlockInputCache.isVerifiedProposerSignature(slot, rootHex, signature)) {
|
||||
const headState = await chain.getHeadState();
|
||||
const signatureSet = getBlockHeaderProposerSignatureSetByHeaderSlot(headState, firstSidecarSignedBlockHeader);
|
||||
const signatureSet = getBlockHeaderProposerSignatureSetByHeaderSlot(
|
||||
chain.index2pubkey,
|
||||
headState,
|
||||
firstSidecarSignedBlockHeader
|
||||
);
|
||||
|
||||
if (
|
||||
!(await chain.bls.verifySignatureSets([signatureSet], {
|
||||
|
||||
@@ -44,7 +44,12 @@ async function validateProposerSlashing(
|
||||
});
|
||||
}
|
||||
|
||||
const signatureSets = getProposerSlashingSignatureSets(state, proposerSlashing);
|
||||
const signatureSets = getProposerSlashingSignatureSets(
|
||||
chain.config,
|
||||
chain.index2pubkey,
|
||||
state.slot,
|
||||
proposerSlashing
|
||||
);
|
||||
if (!(await chain.bls.verifySignatureSets(signatureSets, {batchable: true, priority: prioritizeBls}))) {
|
||||
throw new ProposerSlashingError(GossipAction.REJECT, {
|
||||
code: ProposerSlashingErrorCode.INVALID,
|
||||
|
||||
@@ -14,7 +14,7 @@ export function getAggregateAndProofSigningRoot(
|
||||
epoch: Epoch,
|
||||
aggregateAndProof: SignedAggregateAndProof
|
||||
): Uint8Array {
|
||||
// previously, we call `const aggregatorDomain = state.config.getDomain(state.slot, DOMAIN_AGGREGATE_AND_PROOF, slot);`
|
||||
// previously, we call `const aggregatorDomain = config.getDomain(state.slot, DOMAIN_AGGREGATE_AND_PROOF, slot);`
|
||||
// at fork boundary, it's required to dial to target epoch https://github.com/ChainSafe/lodestar/blob/v1.11.3/packages/beacon-node/src/chain/validation/attestation.ts#L573
|
||||
// instead of that, just use the fork of slot in the attestation data
|
||||
const slot = computeStartSlotAtEpoch(epoch);
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {DOMAIN_CONTRIBUTION_AND_PROOF} from "@lodestar/params";
|
||||
import {
|
||||
CachedBeaconStateAllForks,
|
||||
ISignatureSet,
|
||||
Index2PubkeyCache,
|
||||
SignatureSetType,
|
||||
computeSigningRoot,
|
||||
} from "@lodestar/state-transition";
|
||||
import {altair, ssz} from "@lodestar/types";
|
||||
|
||||
export function getContributionAndProofSignatureSet(
|
||||
config: BeaconConfig,
|
||||
index2pubkey: Index2PubkeyCache,
|
||||
state: CachedBeaconStateAllForks,
|
||||
signedContributionAndProof: altair.SignedContributionAndProof
|
||||
): ISignatureSet {
|
||||
const {epochCtx} = state;
|
||||
const domain = state.config.getDomain(
|
||||
const domain = config.getDomain(
|
||||
state.slot,
|
||||
DOMAIN_CONTRIBUTION_AND_PROOF,
|
||||
signedContributionAndProof.message.contribution.slot
|
||||
@@ -20,7 +23,7 @@ export function getContributionAndProofSignatureSet(
|
||||
const signingData = signedContributionAndProof.message;
|
||||
return {
|
||||
type: SignatureSetType.single,
|
||||
pubkey: epochCtx.index2pubkey[signedContributionAndProof.message.aggregatorIndex],
|
||||
pubkey: index2pubkey[signedContributionAndProof.message.aggregatorIndex],
|
||||
signingRoot: computeSigningRoot(ssz.altair.ContributionAndProof, signingData, domain),
|
||||
signature: signedContributionAndProof.signature,
|
||||
};
|
||||
|
||||
@@ -1,21 +1,25 @@
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {DOMAIN_SYNC_COMMITTEE} from "@lodestar/params";
|
||||
import {
|
||||
CachedBeaconStateAllForks,
|
||||
ISignatureSet,
|
||||
Index2PubkeyCache,
|
||||
SignatureSetType,
|
||||
computeSigningRoot,
|
||||
} from "@lodestar/state-transition";
|
||||
import {altair, ssz} from "@lodestar/types";
|
||||
|
||||
export function getSyncCommitteeSignatureSet(
|
||||
config: BeaconConfig,
|
||||
index2pubkey: Index2PubkeyCache,
|
||||
state: CachedBeaconStateAllForks,
|
||||
syncCommittee: altair.SyncCommitteeMessage
|
||||
): ISignatureSet {
|
||||
const domain = state.config.getDomain(state.slot, DOMAIN_SYNC_COMMITTEE, syncCommittee.slot);
|
||||
const domain = config.getDomain(state.slot, DOMAIN_SYNC_COMMITTEE, syncCommittee.slot);
|
||||
|
||||
return {
|
||||
type: SignatureSetType.single,
|
||||
pubkey: state.epochCtx.index2pubkey[syncCommittee.validatorIndex],
|
||||
pubkey: index2pubkey[syncCommittee.validatorIndex],
|
||||
signingRoot: computeSigningRoot(ssz.Root, syncCommittee.beaconBlockRoot, domain),
|
||||
signature: syncCommittee.signature,
|
||||
};
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
import {PublicKey} from "@chainsafe/blst";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {DOMAIN_SYNC_COMMITTEE} from "@lodestar/params";
|
||||
import {CachedBeaconStateAltair, ISignatureSet, SignatureSetType, computeSigningRoot} from "@lodestar/state-transition";
|
||||
import {altair, ssz} from "@lodestar/types";
|
||||
|
||||
export function getSyncCommitteeContributionSignatureSet(
|
||||
config: BeaconConfig,
|
||||
state: CachedBeaconStateAltair,
|
||||
contribution: altair.SyncCommitteeContribution,
|
||||
pubkeys: PublicKey[]
|
||||
): ISignatureSet {
|
||||
const domain = state.config.getDomain(state.slot, DOMAIN_SYNC_COMMITTEE, contribution.slot);
|
||||
const domain = config.getDomain(state.slot, DOMAIN_SYNC_COMMITTEE, contribution.slot);
|
||||
return {
|
||||
type: SignatureSetType.aggregate,
|
||||
pubkeys,
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF} from "@lodestar/params";
|
||||
import {
|
||||
CachedBeaconStateAllForks,
|
||||
ISignatureSet,
|
||||
Index2PubkeyCache,
|
||||
SignatureSetType,
|
||||
computeSigningRoot,
|
||||
} from "@lodestar/state-transition";
|
||||
import {altair, ssz} from "@lodestar/types";
|
||||
|
||||
export function getSyncCommitteeSelectionProofSignatureSet(
|
||||
config: BeaconConfig,
|
||||
index2pubkey: Index2PubkeyCache,
|
||||
state: CachedBeaconStateAllForks,
|
||||
contributionAndProof: altair.ContributionAndProof
|
||||
): ISignatureSet {
|
||||
const {epochCtx, config} = state;
|
||||
const slot = contributionAndProof.contribution.slot;
|
||||
const domain = config.getDomain(state.slot, DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF, slot);
|
||||
const signingData: altair.SyncAggregatorSelectionData = {
|
||||
@@ -20,7 +23,7 @@ export function getSyncCommitteeSelectionProofSignatureSet(
|
||||
};
|
||||
return {
|
||||
type: SignatureSetType.single,
|
||||
pubkey: epochCtx.index2pubkey[contributionAndProof.aggregatorIndex],
|
||||
pubkey: index2pubkey[contributionAndProof.aggregatorIndex],
|
||||
signingRoot: computeSigningRoot(ssz.altair.SyncAggregatorSelectionData, signingData, domain),
|
||||
signature: contributionAndProof.selectionProof,
|
||||
};
|
||||
|
||||
@@ -89,7 +89,7 @@ async function validateSyncCommitteeSigOnly(
|
||||
syncCommittee: altair.SyncCommitteeMessage,
|
||||
prioritizeBls = false
|
||||
): Promise<void> {
|
||||
const signatureSet = getSyncCommitteeSignatureSet(headState, syncCommittee);
|
||||
const signatureSet = getSyncCommitteeSignatureSet(chain.config, chain.index2pubkey, headState, syncCommittee);
|
||||
if (!(await chain.bls.verifySignatureSets([signatureSet], {batchable: true, priority: prioritizeBls}))) {
|
||||
throw new SyncCommitteeError(GossipAction.REJECT, {
|
||||
code: SyncCommitteeErrorCode.INVALID_SIGNATURE,
|
||||
|
||||
@@ -21,6 +21,7 @@ export async function validateSyncCommitteeGossipContributionAndProof(
|
||||
const contributionAndProof = signedContributionAndProof.message;
|
||||
const {contribution, aggregatorIndex} = contributionAndProof;
|
||||
const {subcommitteeIndex, slot} = contribution;
|
||||
const {index2pubkey} = chain;
|
||||
|
||||
const headState = chain.getHeadState();
|
||||
validateGossipSyncCommitteeExceptSig(chain, headState, subcommitteeIndex, {
|
||||
@@ -73,20 +74,23 @@ export async function validateSyncCommitteeGossipContributionAndProof(
|
||||
// i.e. state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index).
|
||||
// > Checked in validateGossipSyncCommitteeExceptSig()
|
||||
|
||||
const participantPubkeys = syncCommitteeParticipantIndices.map(
|
||||
(validatorIndex) => headState.epochCtx.index2pubkey[validatorIndex]
|
||||
);
|
||||
const participantPubkeys = syncCommitteeParticipantIndices.map((validatorIndex) => index2pubkey[validatorIndex]);
|
||||
const signatureSets = [
|
||||
// [REJECT] The contribution_and_proof.selection_proof is a valid signature of the SyncAggregatorSelectionData
|
||||
// derived from the contribution by the validator with index contribution_and_proof.aggregator_index.
|
||||
getSyncCommitteeSelectionProofSignatureSet(headState, contributionAndProof),
|
||||
getSyncCommitteeSelectionProofSignatureSet(chain.config, index2pubkey, headState, contributionAndProof),
|
||||
|
||||
// [REJECT] The aggregator signature, signed_contribution_and_proof.signature, is valid.
|
||||
getContributionAndProofSignatureSet(headState, signedContributionAndProof),
|
||||
getContributionAndProofSignatureSet(chain.config, index2pubkey, headState, signedContributionAndProof),
|
||||
|
||||
// [REJECT] The aggregate signature is valid for the message beacon_block_root and aggregate pubkey derived from
|
||||
// the participation info in aggregation_bits for the subcommittee specified by the contribution.subcommittee_index.
|
||||
getSyncCommitteeContributionSignatureSet(headState as CachedBeaconStateAltair, contribution, participantPubkeys),
|
||||
getSyncCommitteeContributionSignatureSet(
|
||||
chain.config,
|
||||
headState as CachedBeaconStateAltair,
|
||||
contribution,
|
||||
participantPubkeys
|
||||
),
|
||||
];
|
||||
|
||||
if (!(await chain.bls.verifySignatureSets(signatureSets, {batchable: true}))) {
|
||||
|
||||
@@ -59,7 +59,7 @@ async function validateVoluntaryExit(
|
||||
});
|
||||
}
|
||||
|
||||
const signatureSet = getVoluntaryExitSignatureSet(state, voluntaryExit);
|
||||
const signatureSet = getVoluntaryExitSignatureSet(chain.config, chain.index2pubkey, state.slot, voluntaryExit);
|
||||
if (!(await chain.bls.verifySignatureSets([signatureSet], {batchable: true, priority: prioritizeBls}))) {
|
||||
throw new VoluntaryExitError(GossipAction.REJECT, {
|
||||
code: VoluntaryExitErrorCode.INVALID_SIGNATURE,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {Db, LevelDbControllerMetrics} from "@lodestar/db";
|
||||
import {Db, LevelDbControllerMetrics, encodeKey} from "@lodestar/db";
|
||||
import {Bucket} from "./buckets.js";
|
||||
import {IBeaconDb} from "./interface.js";
|
||||
import {CheckpointStateRepository} from "./repositories/checkpointState.js";
|
||||
import {
|
||||
@@ -14,16 +15,12 @@ import {
|
||||
CheckpointHeaderRepository,
|
||||
DataColumnSidecarArchiveRepository,
|
||||
DataColumnSidecarRepository,
|
||||
DepositDataRootRepository,
|
||||
DepositEventRepository,
|
||||
Eth1DataRepository,
|
||||
ProposerSlashingRepository,
|
||||
StateArchiveRepository,
|
||||
SyncCommitteeRepository,
|
||||
SyncCommitteeWitnessRepository,
|
||||
VoluntaryExitRepository,
|
||||
} from "./repositories/index.js";
|
||||
import {PreGenesisState, PreGenesisStateLastProcessedBlock} from "./single/index.js";
|
||||
|
||||
export type BeaconDbModules = {
|
||||
config: ChainForkConfig;
|
||||
@@ -45,14 +42,8 @@ export class BeaconDb implements IBeaconDb {
|
||||
voluntaryExit: VoluntaryExitRepository;
|
||||
proposerSlashing: ProposerSlashingRepository;
|
||||
attesterSlashing: AttesterSlashingRepository;
|
||||
depositEvent: DepositEventRepository;
|
||||
blsToExecutionChange: BLSToExecutionChangeRepository;
|
||||
|
||||
depositDataRoot: DepositDataRootRepository;
|
||||
eth1Data: Eth1DataRepository;
|
||||
preGenesisState: PreGenesisState;
|
||||
preGenesisStateLastProcessedBlock: PreGenesisStateLastProcessedBlock;
|
||||
|
||||
// lightclient
|
||||
bestLightClientUpdate: BestLightClientUpdateRepository;
|
||||
checkpointHeader: CheckpointHeaderRepository;
|
||||
@@ -80,11 +71,6 @@ export class BeaconDb implements IBeaconDb {
|
||||
this.blsToExecutionChange = new BLSToExecutionChangeRepository(config, db);
|
||||
this.proposerSlashing = new ProposerSlashingRepository(config, db);
|
||||
this.attesterSlashing = new AttesterSlashingRepository(config, db);
|
||||
this.depositEvent = new DepositEventRepository(config, db);
|
||||
this.depositDataRoot = new DepositDataRootRepository(config, db);
|
||||
this.eth1Data = new Eth1DataRepository(config, db);
|
||||
this.preGenesisState = new PreGenesisState(config, db);
|
||||
this.preGenesisStateLastProcessedBlock = new PreGenesisStateLastProcessedBlock(config, db);
|
||||
|
||||
// lightclient
|
||||
this.bestLightClientUpdate = new BestLightClientUpdateRepository(config, db);
|
||||
@@ -110,4 +96,40 @@ export class BeaconDb implements IBeaconDb {
|
||||
// TODO: Enable once it's deemed safe
|
||||
// await this.block.batchDelete(await this.block.keys());
|
||||
}
|
||||
|
||||
async deleteDeprecatedEth1Data(): Promise<void> {
|
||||
const deprecatedBuckets = [
|
||||
Bucket.phase0_eth1Data,
|
||||
Bucket.index_depositDataRoot,
|
||||
Bucket.phase0_depositData,
|
||||
Bucket.phase0_depositEvent,
|
||||
Bucket.phase0_preGenesisState,
|
||||
Bucket.phase0_preGenesisStateLastProcessedBlock,
|
||||
];
|
||||
|
||||
for (const bucket of deprecatedBuckets) {
|
||||
await this.deleteBucketData(bucket);
|
||||
}
|
||||
}
|
||||
|
||||
private async deleteBucketData(bucket: Bucket): Promise<void> {
|
||||
const minKey = encodeKey(bucket, Buffer.alloc(0));
|
||||
const maxKey = encodeKey(bucket + 1, Buffer.alloc(0));
|
||||
|
||||
// Batch delete to avoid loading all keys into memory at once
|
||||
const BATCH_DELETE_SIZE = 1000;
|
||||
let keysBatch: Uint8Array[] = [];
|
||||
|
||||
for await (const key of this.db.keysStream({gte: minKey, lt: maxKey})) {
|
||||
keysBatch.push(key);
|
||||
if (keysBatch.length >= BATCH_DELETE_SIZE) {
|
||||
await this.db.batchDelete(keysBatch);
|
||||
keysBatch = [];
|
||||
}
|
||||
}
|
||||
|
||||
if (keysBatch.length > 0) {
|
||||
await this.db.batchDelete(keysBatch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,14 +16,16 @@ export enum Bucket {
|
||||
index_mainChain = 6, // Slot -> Root<BeaconBlock>
|
||||
// justified, finalized state and block hashes
|
||||
index_chainInfo = 7, // Key -> Number64 | stateHash | blockHash
|
||||
// eth1 processing
|
||||
phase0_eth1Data = 8, // timestamp -> Eth1Data
|
||||
index_depositDataRoot = 9, // depositIndex -> Root<DepositData>
|
||||
/** @deprecated Eth1 deposit tracking is not required since electra, only kept around to delete data from existing databases */
|
||||
phase0_eth1Data = 8,
|
||||
/** @deprecated Eth1 deposit tracking is not required since electra, only kept around to delete data from existing databases */
|
||||
index_depositDataRoot = 9,
|
||||
|
||||
// op pool
|
||||
// phase0_attestation = 10, // DEPRECATED on v0.25.0
|
||||
// phase0_aggregateAndProof = 11, // Root -> AggregateAndProof, DEPRECATED on v.27.0
|
||||
phase0_depositData = 12, // [DEPRECATED] index -> DepositData
|
||||
/** @deprecated Eth1 deposit tracking is not required since electra, only kept around to delete data from existing databases */
|
||||
phase0_depositData = 12,
|
||||
phase0_exit = 13, // ValidatorIndex -> VoluntaryExit
|
||||
phase0_proposerSlashing = 14, // ValidatorIndex -> ProposerSlashing
|
||||
allForks_attesterSlashing = 15, // Root -> AttesterSlashing
|
||||
@@ -32,15 +34,18 @@ export enum Bucket {
|
||||
allForks_checkpointState = 17, // Root -> BeaconState
|
||||
|
||||
// allForks_pendingBlock = 25, // Root -> SignedBeaconBlock // DEPRECATED on v0.30.0
|
||||
phase0_depositEvent = 19, // depositIndex -> DepositEvent
|
||||
/** @deprecated Eth1 deposit tracking is not required since electra, only kept around to delete data from existing databases */
|
||||
phase0_depositEvent = 19,
|
||||
|
||||
index_stateArchiveRootIndex = 26, // State Root -> slot
|
||||
|
||||
deneb_blobSidecars = 27, // DENEB BeaconBlockRoot -> BlobSidecars
|
||||
deneb_blobSidecarsArchive = 28, // DENEB BeaconBlockSlot -> BlobSidecars
|
||||
|
||||
phase0_preGenesisState = 30, // Single = phase0.BeaconState
|
||||
phase0_preGenesisStateLastProcessedBlock = 31, // Single = Uint8
|
||||
/** @deprecated Genesis from eth1 is no longer supported, only kept around to delete data from existing databases */
|
||||
phase0_preGenesisState = 30,
|
||||
/** @deprecated Genesis from eth1 is no longer supported, only kept around to delete data from existing databases */
|
||||
phase0_preGenesisStateLastProcessedBlock = 31,
|
||||
|
||||
// Lightclient server
|
||||
// altair_bestUpdatePerCommitteePeriod = 30, // DEPRECATED on v0.32.0
|
||||
|
||||
@@ -12,16 +12,12 @@ import {
|
||||
CheckpointHeaderRepository,
|
||||
DataColumnSidecarArchiveRepository,
|
||||
DataColumnSidecarRepository,
|
||||
DepositDataRootRepository,
|
||||
DepositEventRepository,
|
||||
Eth1DataRepository,
|
||||
ProposerSlashingRepository,
|
||||
StateArchiveRepository,
|
||||
SyncCommitteeRepository,
|
||||
SyncCommitteeWitnessRepository,
|
||||
VoluntaryExitRepository,
|
||||
} from "./repositories/index.js";
|
||||
import {PreGenesisState, PreGenesisStateLastProcessedBlock} from "./single/index.js";
|
||||
|
||||
/**
|
||||
* The DB service manages the data layer of the beacon chain
|
||||
@@ -48,17 +44,8 @@ export interface IBeaconDb {
|
||||
voluntaryExit: VoluntaryExitRepository;
|
||||
proposerSlashing: ProposerSlashingRepository;
|
||||
attesterSlashing: AttesterSlashingRepository;
|
||||
depositEvent: DepositEventRepository;
|
||||
blsToExecutionChange: BLSToExecutionChangeRepository;
|
||||
|
||||
// eth1 processing
|
||||
preGenesisState: PreGenesisState;
|
||||
preGenesisStateLastProcessedBlock: PreGenesisStateLastProcessedBlock;
|
||||
|
||||
// all deposit data roots and merkle tree
|
||||
depositDataRoot: DepositDataRootRepository;
|
||||
eth1Data: Eth1DataRepository;
|
||||
|
||||
// lightclient
|
||||
bestLightClientUpdate: BestLightClientUpdateRepository;
|
||||
checkpointHeader: CheckpointHeaderRepository;
|
||||
@@ -69,6 +56,8 @@ export interface IBeaconDb {
|
||||
|
||||
pruneHotDb(): Promise<void>;
|
||||
|
||||
deleteDeprecatedEth1Data(): Promise<void>;
|
||||
|
||||
/** Close the connection to the db instance and close the db store. */
|
||||
close(): Promise<void>;
|
||||
/** To inject metrics after CLI initialization */
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
import {ByteVectorType, CompositeViewDU, ListCompositeType} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {Db, KeyValue, Repository} from "@lodestar/db";
|
||||
import {Root, ssz} from "@lodestar/types";
|
||||
import {bytesToInt} from "@lodestar/utils";
|
||||
import {Bucket, getBucketNameByValue} from "../buckets.js";
|
||||
|
||||
// TODO: Review where is best to put this type
|
||||
export type DepositTree = CompositeViewDU<ListCompositeType<ByteVectorType>>;
|
||||
|
||||
export class DepositDataRootRepository extends Repository<number, Root> {
|
||||
private depositRootTree?: DepositTree;
|
||||
|
||||
constructor(config: ChainForkConfig, db: Db) {
|
||||
const bucket = Bucket.index_depositDataRoot;
|
||||
super(config, db, bucket, ssz.Root, getBucketNameByValue(bucket));
|
||||
}
|
||||
|
||||
decodeKey(data: Buffer): number {
|
||||
return bytesToInt(super.decodeKey(data) as unknown as Uint8Array, "be");
|
||||
}
|
||||
|
||||
// depositDataRoots stored by depositData index
|
||||
getId(_value: Root): number {
|
||||
throw new Error("Unable to create depositIndex from root");
|
||||
}
|
||||
|
||||
async put(index: number, value: Root): Promise<void> {
|
||||
await super.put(index, value);
|
||||
await this.depositRootTreeSet(index, value);
|
||||
}
|
||||
|
||||
async batchPut(items: KeyValue<number, Root>[]): Promise<void> {
|
||||
await super.batchPut(items);
|
||||
for (const {key, value} of items) {
|
||||
await this.depositRootTreeSet(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
async putList(roots: Root[]): Promise<void> {
|
||||
await this.batchPut(roots.map((root, index) => ({key: index, value: root})));
|
||||
}
|
||||
|
||||
async batchPutValues(values: {index: number; root: Root}[]): Promise<void> {
|
||||
await this.batchPut(
|
||||
values.map(({index, root}) => ({
|
||||
key: index,
|
||||
value: root,
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
async getDepositRootTree(): Promise<DepositTree> {
|
||||
if (!this.depositRootTree) {
|
||||
const values = await this.values();
|
||||
this.depositRootTree = ssz.phase0.DepositDataRootList.toViewDU(values);
|
||||
}
|
||||
return this.depositRootTree;
|
||||
}
|
||||
|
||||
async getDepositRootTreeAtIndex(depositIndex: number): Promise<DepositTree> {
|
||||
const depositRootTree = await this.getDepositRootTree();
|
||||
return depositRootTree.sliceTo(depositIndex);
|
||||
}
|
||||
|
||||
private async depositRootTreeSet(index: number, value: Uint8Array): Promise<void> {
|
||||
const depositRootTree = await this.getDepositRootTree();
|
||||
|
||||
// TODO: Review and fix properly
|
||||
if (index > depositRootTree.length) {
|
||||
throw Error(`Error setting depositRootTree index ${index} > length ${depositRootTree.length}`);
|
||||
}
|
||||
|
||||
if (index === depositRootTree.length) {
|
||||
depositRootTree.push(value);
|
||||
} else {
|
||||
depositRootTree.set(index, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {Db, Repository} from "@lodestar/db";
|
||||
import {phase0, ssz} from "@lodestar/types";
|
||||
import {Bucket, getBucketNameByValue} from "../buckets.js";
|
||||
|
||||
/**
|
||||
* DepositData indexed by deposit index
|
||||
* Removed when included on chain or old
|
||||
*/
|
||||
export class DepositEventRepository extends Repository<number, phase0.DepositEvent> {
|
||||
constructor(config: ChainForkConfig, db: Db) {
|
||||
const bucket = Bucket.phase0_depositEvent;
|
||||
super(config, db, bucket, ssz.phase0.DepositEvent, getBucketNameByValue(bucket));
|
||||
}
|
||||
|
||||
async deleteOld(depositCount: number): Promise<void> {
|
||||
const firstDepositIndex = await this.firstKey();
|
||||
if (firstDepositIndex === null) {
|
||||
return;
|
||||
}
|
||||
await this.batchDelete(Array.from({length: depositCount - firstDepositIndex}, (_, i) => i + firstDepositIndex));
|
||||
}
|
||||
|
||||
async batchPutValues(depositEvents: phase0.DepositEvent[]): Promise<void> {
|
||||
await this.batchPut(
|
||||
depositEvents.map((depositEvent) => ({
|
||||
key: depositEvent.index,
|
||||
value: depositEvent,
|
||||
}))
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {Db, Repository} from "@lodestar/db";
|
||||
import {phase0, ssz} from "@lodestar/types";
|
||||
import {bytesToInt} from "@lodestar/utils";
|
||||
import {Bucket, getBucketNameByValue} from "../buckets.js";
|
||||
|
||||
export class Eth1DataRepository extends Repository<number, phase0.Eth1DataOrdered> {
|
||||
constructor(config: ChainForkConfig, db: Db) {
|
||||
const bucket = Bucket.phase0_eth1Data;
|
||||
super(config, db, bucket, ssz.phase0.Eth1DataOrdered, getBucketNameByValue(bucket));
|
||||
}
|
||||
|
||||
decodeKey(data: Buffer): number {
|
||||
return bytesToInt(super.decodeKey(data) as unknown as Uint8Array, "be");
|
||||
}
|
||||
|
||||
getId(_value: phase0.Eth1Data): number {
|
||||
throw new Error("Unable to create timestamp from block hash");
|
||||
}
|
||||
|
||||
async batchPutValues(eth1Datas: (phase0.Eth1DataOrdered & {timestamp: number})[]): Promise<void> {
|
||||
await this.batchPut(
|
||||
eth1Datas.map((eth1Data) => ({
|
||||
key: eth1Data.timestamp,
|
||||
value: eth1Data,
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
async deleteOld(timestamp: number): Promise<void> {
|
||||
await this.batchDelete(await this.keys({lt: timestamp}));
|
||||
}
|
||||
}
|
||||
@@ -8,9 +8,6 @@ export {BlockArchiveRepository} from "./blockArchive.js";
|
||||
export {BLSToExecutionChangeRepository} from "./blsToExecutionChange.js";
|
||||
export {DataColumnSidecarRepository} from "./dataColumnSidecar.js";
|
||||
export {DataColumnSidecarArchiveRepository} from "./dataColumnSidecarArchive.js";
|
||||
export {DepositDataRootRepository} from "./depositDataRoot.js";
|
||||
export {DepositEventRepository} from "./depositEvent.js";
|
||||
export {Eth1DataRepository} from "./eth1Data.js";
|
||||
export {BestLightClientUpdateRepository} from "./lightclientBestUpdate.js";
|
||||
export {CheckpointHeaderRepository} from "./lightclientCheckpointHeader.js";
|
||||
export {SyncCommitteeRepository} from "./lightclientSyncCommittee.js";
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
export {PreGenesisState} from "./preGenesisState.js";
|
||||
export {PreGenesisStateLastProcessedBlock} from "./preGenesisStateLastProcessedBlock.js";
|
||||
@@ -1,37 +0,0 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {Db, DbReqOpts} from "@lodestar/db";
|
||||
import {ForkAll, GENESIS_SLOT} from "@lodestar/params";
|
||||
import {BeaconStateAllForks} from "@lodestar/state-transition";
|
||||
import {SSZTypesFor} from "@lodestar/types";
|
||||
import {Bucket, getBucketNameByValue} from "../buckets.js";
|
||||
|
||||
export class PreGenesisState {
|
||||
private readonly config: ChainForkConfig;
|
||||
private readonly bucket: Bucket;
|
||||
private readonly db: Db;
|
||||
private readonly key: Uint8Array;
|
||||
private readonly type: SSZTypesFor<ForkAll, "BeaconState">;
|
||||
private readonly dbReqOpts: DbReqOpts;
|
||||
|
||||
constructor(config: ChainForkConfig, db: Db) {
|
||||
this.config = config;
|
||||
this.db = db;
|
||||
this.bucket = Bucket.phase0_preGenesisState;
|
||||
this.key = new Uint8Array([this.bucket]);
|
||||
this.type = this.config.getForkTypes(GENESIS_SLOT).BeaconState;
|
||||
this.dbReqOpts = {bucketId: getBucketNameByValue(this.bucket)};
|
||||
}
|
||||
|
||||
async put(value: BeaconStateAllForks): Promise<void> {
|
||||
await this.db.put(this.key, value.serialize(), this.dbReqOpts);
|
||||
}
|
||||
|
||||
async get(): Promise<BeaconStateAllForks | null> {
|
||||
const value = await this.db.get(this.key, this.dbReqOpts);
|
||||
return value ? this.type.deserializeToViewDU(value) : null;
|
||||
}
|
||||
|
||||
async delete(): Promise<void> {
|
||||
await this.db.delete(this.key, this.dbReqOpts);
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
import {UintNumberType} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {Db, DbReqOpts} from "@lodestar/db";
|
||||
import {ssz} from "@lodestar/types";
|
||||
import {Bucket, getBucketNameByValue} from "../buckets.js";
|
||||
|
||||
export class PreGenesisStateLastProcessedBlock {
|
||||
private readonly bucket: Bucket;
|
||||
private readonly type: UintNumberType;
|
||||
private readonly db: Db;
|
||||
private readonly key: Uint8Array;
|
||||
private readonly dbReqOpts: DbReqOpts;
|
||||
|
||||
constructor(_config: ChainForkConfig, db: Db) {
|
||||
this.db = db;
|
||||
this.type = ssz.UintNum64;
|
||||
this.bucket = Bucket.phase0_preGenesisStateLastProcessedBlock;
|
||||
this.key = new Uint8Array([this.bucket]);
|
||||
this.dbReqOpts = {bucketId: getBucketNameByValue(this.bucket)};
|
||||
}
|
||||
|
||||
async put(value: number): Promise<void> {
|
||||
await this.db.put(this.key, this.type.serialize(value), this.dbReqOpts);
|
||||
}
|
||||
|
||||
async get(): Promise<number | null> {
|
||||
const value = await this.db.get(this.key, this.dbReqOpts);
|
||||
return value ? this.type.deserialize(value) : null;
|
||||
}
|
||||
|
||||
async delete(): Promise<void> {
|
||||
await this.db.delete(this.key, this.dbReqOpts);
|
||||
}
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
import {LodestarError} from "@lodestar/utils";
|
||||
|
||||
export enum Eth1ErrorCode {
|
||||
/** Deposit index too high */
|
||||
DEPOSIT_INDEX_TOO_HIGH = "ETH1_ERROR_DEPOSIT_INDEX_TOO_HIGH",
|
||||
/** Not enough deposits in DB */
|
||||
NOT_ENOUGH_DEPOSITS = "ETH1_ERROR_NOT_ENOUGH_DEPOSITS",
|
||||
/** Too many deposits returned by DB */
|
||||
TOO_MANY_DEPOSITS = "ETH1_ERROR_TOO_MANY_DEPOSITS",
|
||||
/** Deposit root tree does not match current eth1Data */
|
||||
WRONG_DEPOSIT_ROOT = "ETH1_ERROR_WRONG_DEPOSIT_ROOT",
|
||||
|
||||
/** No deposits found for block range */
|
||||
NO_DEPOSITS_FOR_BLOCK_RANGE = "ETH1_ERROR_NO_DEPOSITS_FOR_BLOCK_RANGE",
|
||||
/** No depositRoot for depositCount */
|
||||
NO_DEPOSIT_ROOT = "ETH1_ERROR_NO_DEPOSIT_ROOT",
|
||||
/** Not enough deposit roots for index */
|
||||
NOT_ENOUGH_DEPOSIT_ROOTS = "ETH1_ERROR_NOT_ENOUGH_DEPOSIT_ROOTS",
|
||||
|
||||
/** Attempted to insert a duplicate log for same index into the Eth1DepositsCache */
|
||||
DUPLICATE_DISTINCT_LOG = "ETH1_ERROR_DUPLICATE_DISTINCT_LOG",
|
||||
/** Attempted to insert a log with index != prev + 1 into the Eth1DepositsCache */
|
||||
NON_CONSECUTIVE_LOGS = "ETH1_ERROR_NON_CONSECUTIVE_LOGS",
|
||||
/** Expected a deposit log in the db for the index, missing log implies a corrupted db */
|
||||
MISSING_DEPOSIT_LOG = "ETH1_ERROR_MISSING_DEPOSIT_LOG",
|
||||
}
|
||||
|
||||
export type Eth1ErrorType =
|
||||
| {code: Eth1ErrorCode.DEPOSIT_INDEX_TOO_HIGH; depositIndex: number; depositCount: number}
|
||||
| {code: Eth1ErrorCode.NOT_ENOUGH_DEPOSITS; len: number; expectedLen: number}
|
||||
| {code: Eth1ErrorCode.TOO_MANY_DEPOSITS; len: number; expectedLen: number}
|
||||
| {code: Eth1ErrorCode.WRONG_DEPOSIT_ROOT; root: string; expectedRoot: string}
|
||||
| {code: Eth1ErrorCode.NO_DEPOSITS_FOR_BLOCK_RANGE; fromBlock: number; toBlock: number}
|
||||
| {code: Eth1ErrorCode.NO_DEPOSIT_ROOT; depositCount: number}
|
||||
| {code: Eth1ErrorCode.NOT_ENOUGH_DEPOSIT_ROOTS; index: number; treeLength: number}
|
||||
| {code: Eth1ErrorCode.DUPLICATE_DISTINCT_LOG; newIndex: number; lastLogIndex: number}
|
||||
| {code: Eth1ErrorCode.NON_CONSECUTIVE_LOGS; newIndex: number; lastLogIndex: number}
|
||||
| {code: Eth1ErrorCode.MISSING_DEPOSIT_LOG; newIndex: number; lastLogIndex: number};
|
||||
|
||||
export class Eth1Error extends LodestarError<Eth1ErrorType> {}
|
||||
@@ -1,26 +0,0 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {phase0} from "@lodestar/types";
|
||||
import {IBeaconDb} from "../db/index.js";
|
||||
|
||||
export class Eth1DataCache {
|
||||
db: IBeaconDb;
|
||||
config: ChainForkConfig;
|
||||
|
||||
constructor(config: ChainForkConfig, db: IBeaconDb) {
|
||||
this.config = config;
|
||||
this.db = db;
|
||||
}
|
||||
|
||||
async get({timestampRange}: {timestampRange: {gte: number; lte: number}}): Promise<phase0.Eth1DataOrdered[]> {
|
||||
return this.db.eth1Data.values(timestampRange);
|
||||
}
|
||||
|
||||
async add(eth1Datas: (phase0.Eth1DataOrdered & {timestamp: number})[]): Promise<void> {
|
||||
await this.db.eth1Data.batchPutValues(eth1Datas);
|
||||
}
|
||||
|
||||
async getHighestCachedBlockNumber(): Promise<number | null> {
|
||||
const highestEth1Data = await this.db.eth1Data.lastValue();
|
||||
return highestEth1Data?.blockNumber ?? null;
|
||||
}
|
||||
}
|
||||
@@ -1,410 +0,0 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {
|
||||
BeaconStateAllForks,
|
||||
CachedBeaconStateAllForks,
|
||||
CachedBeaconStateElectra,
|
||||
becomesNewEth1Data,
|
||||
} from "@lodestar/state-transition";
|
||||
import {phase0, ssz} from "@lodestar/types";
|
||||
import {ErrorAborted, Logger, TimeoutError, fromHex, isErrorAborted, sleep} from "@lodestar/utils";
|
||||
import {IBeaconDb} from "../db/index.js";
|
||||
import {Metrics} from "../metrics/index.js";
|
||||
import {Eth1DataCache} from "./eth1DataCache.js";
|
||||
import {Eth1DepositsCache} from "./eth1DepositsCache.js";
|
||||
import {Eth1DataAndDeposits, EthJsonRpcBlockRaw, IEth1Provider} from "./interface.js";
|
||||
import {Eth1Options} from "./options.js";
|
||||
import {parseEth1Block} from "./provider/eth1Provider.js";
|
||||
import {HttpRpcError} from "./provider/jsonRpcHttpClient.js";
|
||||
import {isJsonRpcTruncatedError} from "./provider/utils.js";
|
||||
import {getDeposits} from "./utils/deposits.js";
|
||||
import {getEth1VotesToConsider, pickEth1Vote} from "./utils/eth1Vote.js";
|
||||
|
||||
const MAX_BLOCKS_PER_BLOCK_QUERY = 1000;
|
||||
const MIN_BLOCKS_PER_BLOCK_QUERY = 10;
|
||||
|
||||
const MAX_BLOCKS_PER_LOG_QUERY = 1000;
|
||||
const MIN_BLOCKS_PER_LOG_QUERY = 10;
|
||||
|
||||
/** Eth1 blocks happen every 14s approx, not need to update too often once synced */
|
||||
const AUTO_UPDATE_PERIOD_MS = 60 * 1000;
|
||||
/** Prevent infinite loops */
|
||||
const MIN_UPDATE_PERIOD_MS = 1 * 1000;
|
||||
/** Milliseconds to wait after getting 429 Too Many Requests */
|
||||
const RATE_LIMITED_WAIT_MS = 30 * 1000;
|
||||
/** Min time to wait on auto update loop on unknown error */
|
||||
const MIN_WAIT_ON_ERROR_MS = 1 * 1000;
|
||||
|
||||
/** Number of blocks to download if the node detects it is lagging behind due to an inaccurate
|
||||
relationship between block-number-based follow distance and time-based follow distance. */
|
||||
const ETH1_FOLLOW_DISTANCE_DELTA_IF_SLOW = 32;
|
||||
|
||||
/** The absolute minimum follow distance to enforce when downloading catchup batches, from LH */
|
||||
const ETH_MIN_FOLLOW_DISTANCE = 64;
|
||||
|
||||
export type Eth1DepositDataTrackerModules = {
|
||||
config: ChainForkConfig;
|
||||
db: IBeaconDb;
|
||||
metrics: Metrics | null;
|
||||
logger: Logger;
|
||||
signal: AbortSignal;
|
||||
};
|
||||
|
||||
/**
|
||||
* Main class handling eth1 data fetching, processing and storing
|
||||
* Upon instantiation, starts fetching deposits and blocks at regular intervals
|
||||
*/
|
||||
export class Eth1DepositDataTracker {
|
||||
private config: ChainForkConfig;
|
||||
private logger: Logger;
|
||||
private signal: AbortSignal;
|
||||
private readonly metrics: Metrics | null;
|
||||
|
||||
// Internal modules, state
|
||||
private depositsCache: Eth1DepositsCache;
|
||||
private eth1DataCache: Eth1DataCache;
|
||||
private lastProcessedDepositBlockNumber: number | null = null;
|
||||
|
||||
/** Dynamically adjusted follow distance */
|
||||
private eth1FollowDistance: number;
|
||||
/** Dynamically adjusted batch size to fetch deposit logs */
|
||||
private eth1GetBlocksBatchSizeDynamic = MAX_BLOCKS_PER_BLOCK_QUERY;
|
||||
/** Dynamically adjusted batch size to fetch deposit logs */
|
||||
private eth1GetLogsBatchSizeDynamic = MAX_BLOCKS_PER_LOG_QUERY;
|
||||
private readonly forcedEth1DataVote: phase0.Eth1Data | null;
|
||||
/** To stop `runAutoUpdate()` in addition to AbortSignal */
|
||||
private stopPolling: boolean;
|
||||
|
||||
constructor(
|
||||
opts: Eth1Options,
|
||||
{config, db, metrics, logger, signal}: Eth1DepositDataTrackerModules,
|
||||
private readonly eth1Provider: IEth1Provider
|
||||
) {
|
||||
this.config = config;
|
||||
this.metrics = metrics;
|
||||
this.logger = logger;
|
||||
this.signal = signal;
|
||||
this.eth1Provider = eth1Provider;
|
||||
this.depositsCache = new Eth1DepositsCache(opts, config, db);
|
||||
this.eth1DataCache = new Eth1DataCache(config, db);
|
||||
this.eth1FollowDistance = config.ETH1_FOLLOW_DISTANCE;
|
||||
this.stopPolling = false;
|
||||
|
||||
this.forcedEth1DataVote = opts.forcedEth1DataVote
|
||||
? ssz.phase0.Eth1Data.deserialize(fromHex(opts.forcedEth1DataVote))
|
||||
: null;
|
||||
|
||||
if (opts.depositContractDeployBlock === undefined) {
|
||||
this.logger.warn("No depositContractDeployBlock provided");
|
||||
}
|
||||
|
||||
if (metrics) {
|
||||
// Set constant value once
|
||||
metrics?.eth1.eth1FollowDistanceSecondsConfig.set(config.SECONDS_PER_ETH1_BLOCK * config.ETH1_FOLLOW_DISTANCE);
|
||||
metrics.eth1.eth1FollowDistanceDynamic.addCollect(() => {
|
||||
metrics.eth1.eth1FollowDistanceDynamic.set(this.eth1FollowDistance);
|
||||
metrics.eth1.eth1GetBlocksBatchSizeDynamic.set(this.eth1GetBlocksBatchSizeDynamic);
|
||||
metrics.eth1.eth1GetLogsBatchSizeDynamic.set(this.eth1GetLogsBatchSizeDynamic);
|
||||
});
|
||||
}
|
||||
|
||||
if (opts.enabled) {
|
||||
this.runAutoUpdate().catch((e: Error) => {
|
||||
if (!(e instanceof ErrorAborted)) {
|
||||
this.logger.error("Error on eth1 loop", {}, e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
isPollingEth1Data(): boolean {
|
||||
return !this.stopPolling;
|
||||
}
|
||||
|
||||
stopPollingEth1Data(): void {
|
||||
this.stopPolling = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return eth1Data and deposits ready for block production for a given state
|
||||
*/
|
||||
async getEth1DataAndDeposits(state: CachedBeaconStateAllForks): Promise<Eth1DataAndDeposits> {
|
||||
if (
|
||||
state.epochCtx.isPostElectra() &&
|
||||
state.eth1DepositIndex >= (state as CachedBeaconStateElectra).depositRequestsStartIndex
|
||||
) {
|
||||
// No need to poll eth1Data since Electra deprecates the mechanism after depositRequestsStartIndex is reached
|
||||
return {eth1Data: state.eth1Data, deposits: []};
|
||||
}
|
||||
const eth1Data = this.forcedEth1DataVote ?? (await this.getEth1Data(state));
|
||||
const deposits = await this.getDeposits(state, eth1Data);
|
||||
return {eth1Data, deposits};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an eth1Data vote for a given state.
|
||||
* Requires internal caches to be updated regularly to return good results
|
||||
*/
|
||||
private async getEth1Data(state: BeaconStateAllForks): Promise<phase0.Eth1Data> {
|
||||
try {
|
||||
const eth1VotesToConsider = await getEth1VotesToConsider(
|
||||
this.config,
|
||||
state,
|
||||
this.eth1DataCache.get.bind(this.eth1DataCache)
|
||||
);
|
||||
return pickEth1Vote(state, eth1VotesToConsider);
|
||||
} catch (e) {
|
||||
// Note: In case there's a DB issue, don't stop a block proposal. Just vote for current eth1Data
|
||||
this.logger.error("CRITICAL: Error reading valid votes, voting for current eth1Data", {}, e as Error);
|
||||
return state.eth1Data;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns deposits to be included for a given state and eth1Data vote.
|
||||
* Requires internal caches to be updated regularly to return good results
|
||||
*/
|
||||
private async getDeposits(
|
||||
state: CachedBeaconStateAllForks,
|
||||
eth1DataVote: phase0.Eth1Data
|
||||
): Promise<phase0.Deposit[]> {
|
||||
// No new deposits have to be included, continue
|
||||
if (eth1DataVote.depositCount === state.eth1DepositIndex) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// TODO: Review if this is optimal
|
||||
// Convert to view first to hash once and compare hashes
|
||||
const eth1DataVoteView = ssz.phase0.Eth1Data.toViewDU(eth1DataVote);
|
||||
|
||||
// Eth1 data may change due to the vote included in this block
|
||||
const newEth1Data = becomesNewEth1Data(state, eth1DataVoteView) ? eth1DataVoteView : state.eth1Data;
|
||||
return getDeposits(state, newEth1Data, this.depositsCache.get.bind(this.depositsCache));
|
||||
}
|
||||
|
||||
/**
|
||||
* Abortable async setInterval that runs its callback once at max between `ms` at minimum
|
||||
*/
|
||||
private async runAutoUpdate(): Promise<void> {
|
||||
let lastRunMs = 0;
|
||||
|
||||
while (!this.signal.aborted && !this.stopPolling) {
|
||||
lastRunMs = Date.now();
|
||||
|
||||
try {
|
||||
const hasCaughtUp = await this.update();
|
||||
|
||||
this.metrics?.eth1.depositTrackerIsCaughtup.set(hasCaughtUp ? 1 : 0);
|
||||
|
||||
if (hasCaughtUp) {
|
||||
const sleepTimeMs = Math.max(AUTO_UPDATE_PERIOD_MS + lastRunMs - Date.now(), MIN_UPDATE_PERIOD_MS);
|
||||
await sleep(sleepTimeMs, this.signal);
|
||||
}
|
||||
} catch (e) {
|
||||
this.metrics?.eth1.depositTrackerUpdateErrors.inc(1);
|
||||
|
||||
// From Infura: 429 Too Many Requests
|
||||
if (e instanceof HttpRpcError && e.status === 429) {
|
||||
this.logger.debug("Eth1 provider rate limited", {}, e);
|
||||
await sleep(RATE_LIMITED_WAIT_MS, this.signal);
|
||||
// only log error if state switched from online to some other state
|
||||
} else if (!isErrorAborted(e)) {
|
||||
await sleep(MIN_WAIT_ON_ERROR_MS, this.signal);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the deposit and block cache, returning an error if either fail
|
||||
* @returns true if it has catched up to the remote follow block
|
||||
*/
|
||||
private async update(): Promise<boolean> {
|
||||
const remoteHighestBlock = await this.eth1Provider.getBlockNumber();
|
||||
this.metrics?.eth1.remoteHighestBlock.set(remoteHighestBlock);
|
||||
|
||||
const remoteFollowBlock = remoteHighestBlock - this.eth1FollowDistance;
|
||||
|
||||
// If remoteFollowBlock is not at or beyond deployBlock, there is no need to
|
||||
// fetch and track any deposit data yet
|
||||
if (remoteFollowBlock < (this.eth1Provider.deployBlock ?? 0)) return true;
|
||||
|
||||
const hasCaughtUpDeposits = await this.updateDepositCache(remoteFollowBlock);
|
||||
const hasCaughtUpBlocks = await this.updateBlockCache(remoteFollowBlock);
|
||||
return hasCaughtUpDeposits && hasCaughtUpBlocks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch deposit events from remote eth1 node up to follow-distance block
|
||||
* @returns true if it has catched up to the remote follow block
|
||||
*/
|
||||
private async updateDepositCache(remoteFollowBlock: number): Promise<boolean> {
|
||||
const lastProcessedDepositBlockNumber = await this.getLastProcessedDepositBlockNumber();
|
||||
// The DB may contain deposits from a different chain making lastProcessedDepositBlockNumber > current chain tip
|
||||
// The Math.min() fixes those rare scenarios where fromBlock > toBlock
|
||||
const fromBlock = Math.min(remoteFollowBlock, this.getFromBlockToFetch(lastProcessedDepositBlockNumber));
|
||||
const toBlock = Math.min(remoteFollowBlock, fromBlock + this.eth1GetLogsBatchSizeDynamic - 1);
|
||||
|
||||
let depositEvents: phase0.DepositEvent[];
|
||||
try {
|
||||
depositEvents = await this.eth1Provider.getDepositEvents(fromBlock, toBlock);
|
||||
// Increase the batch size linearly even if we scale down exponentially (half each time)
|
||||
this.eth1GetLogsBatchSizeDynamic = Math.min(
|
||||
MAX_BLOCKS_PER_LOG_QUERY,
|
||||
this.eth1GetLogsBatchSizeDynamic + MIN_BLOCKS_PER_LOG_QUERY
|
||||
);
|
||||
} catch (e) {
|
||||
if (isJsonRpcTruncatedError(e as Error) || e instanceof TimeoutError) {
|
||||
this.eth1GetLogsBatchSizeDynamic = Math.max(
|
||||
MIN_BLOCKS_PER_LOG_QUERY,
|
||||
Math.floor(this.eth1GetLogsBatchSizeDynamic / 2)
|
||||
);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
||||
this.logger.verbose("Fetched deposits", {depositCount: depositEvents.length, fromBlock, toBlock});
|
||||
this.metrics?.eth1.depositEventsFetched.inc(depositEvents.length);
|
||||
|
||||
await this.depositsCache.add(depositEvents);
|
||||
// Store the `toBlock` since that block may not contain
|
||||
this.lastProcessedDepositBlockNumber = toBlock;
|
||||
this.metrics?.eth1.lastProcessedDepositBlockNumber.set(toBlock);
|
||||
|
||||
return toBlock >= remoteFollowBlock;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch block headers from a remote eth1 node up to follow-distance block
|
||||
*
|
||||
* depositRoot and depositCount are inferred from already fetched deposits.
|
||||
* Calling get_deposit_root() and the smart contract for a non-latest block requires an
|
||||
* archive node, something most users don't have access too.
|
||||
* @returns true if it has catched up to the remote follow timestamp
|
||||
*/
|
||||
private async updateBlockCache(remoteFollowBlock: number): Promise<boolean> {
|
||||
const lastCachedBlock = await this.eth1DataCache.getHighestCachedBlockNumber();
|
||||
// lastProcessedDepositBlockNumber sets the upper bound of the possible block range to fetch in this update
|
||||
const lastProcessedDepositBlockNumber = await this.getLastProcessedDepositBlockNumber();
|
||||
// lowestEventBlockNumber set a lower bound of possible block range to fetch in this update
|
||||
const lowestEventBlockNumber = await this.depositsCache.getLowestDepositEventBlockNumber();
|
||||
|
||||
// We are all caught up if:
|
||||
// 1. If lowestEventBlockNumber is null = no deposits have been fetch or found yet.
|
||||
// So there's not useful blocks to fetch until at least 1 deposit is found.
|
||||
// 2. If the remoteFollowBlock is behind the lowestEventBlockNumber. This can happen
|
||||
// if the EL's data was wiped and restarted. Not exiting here would other wise
|
||||
// cause a NO_DEPOSITS_FOR_BLOCK_RANGE error
|
||||
if (
|
||||
lowestEventBlockNumber === null ||
|
||||
lastProcessedDepositBlockNumber === null ||
|
||||
remoteFollowBlock < lowestEventBlockNumber
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Cap the upper limit of fromBlock with remoteFollowBlock in case deployBlock is set to a different network value
|
||||
const fromBlock = Math.min(
|
||||
remoteFollowBlock,
|
||||
// Fetch from the last cached block or the lowest known deposit block number
|
||||
Math.max(this.getFromBlockToFetch(lastCachedBlock), lowestEventBlockNumber)
|
||||
);
|
||||
const toBlock = Math.min(
|
||||
remoteFollowBlock,
|
||||
fromBlock + this.eth1GetBlocksBatchSizeDynamic - 1, // Block range is inclusive
|
||||
lastProcessedDepositBlockNumber
|
||||
);
|
||||
|
||||
let blocksRaw: EthJsonRpcBlockRaw[];
|
||||
try {
|
||||
blocksRaw = await this.eth1Provider.getBlocksByNumber(fromBlock, toBlock);
|
||||
// Increase the batch size linearly even if we scale down exponentially (half each time)
|
||||
this.eth1GetBlocksBatchSizeDynamic = Math.min(
|
||||
MAX_BLOCKS_PER_BLOCK_QUERY,
|
||||
this.eth1GetBlocksBatchSizeDynamic + MIN_BLOCKS_PER_BLOCK_QUERY
|
||||
);
|
||||
} catch (e) {
|
||||
if (isJsonRpcTruncatedError(e as Error) || e instanceof TimeoutError) {
|
||||
this.eth1GetBlocksBatchSizeDynamic = Math.max(
|
||||
MIN_BLOCKS_PER_BLOCK_QUERY,
|
||||
Math.floor(this.eth1GetBlocksBatchSizeDynamic / 2)
|
||||
);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
const blocks = blocksRaw.map(parseEth1Block);
|
||||
|
||||
this.logger.verbose("Fetched eth1 blocks", {blockCount: blocks.length, fromBlock, toBlock});
|
||||
this.metrics?.eth1.blocksFetched.inc(blocks.length);
|
||||
this.metrics?.eth1.lastFetchedBlockBlockNumber.set(toBlock);
|
||||
const lastBlock = blocks.at(-1);
|
||||
if (lastBlock) {
|
||||
this.metrics?.eth1.lastFetchedBlockTimestamp.set(lastBlock.timestamp);
|
||||
}
|
||||
|
||||
const eth1Datas = await this.depositsCache.getEth1DataForBlocks(blocks, lastProcessedDepositBlockNumber);
|
||||
await this.eth1DataCache.add(eth1Datas);
|
||||
|
||||
// Note: ETH1_FOLLOW_DISTANCE_SECONDS = ETH1_FOLLOW_DISTANCE * SECONDS_PER_ETH1_BLOCK
|
||||
// Deposit tracker must fetch blocks and deposits up to ETH1_FOLLOW_DISTANCE_SECONDS,
|
||||
// measured in time not blocks. To vote on valid votes it must populate up to the time based follow distance.
|
||||
// If it assumes SECONDS_PER_ETH1_BLOCK but block times are:
|
||||
// - slower: Cache will not contain all blocks
|
||||
// - faster: Cache will contain all required blocks + some ahead of timed follow distance
|
||||
//
|
||||
// For mainnet we must fetch blocks up until block.timestamp < now - 28672 sec. Based on follow distance:
|
||||
// Block times | actual follow distance
|
||||
// 14 | 2048
|
||||
// 20 | 1434
|
||||
// 30 | 956
|
||||
// 60 | 478
|
||||
//
|
||||
// So if after fetching the block at ETH1_FOLLOW_DISTANCE, but it's timestamp is not greater than
|
||||
// ETH1_FOLLOW_DISTANCE_SECONDS, reduce the ETH1_FOLLOW_DISTANCE by a small delta and fetch more blocks.
|
||||
// Otherwise if the last fetched block if above ETH1_FOLLOW_DISTANCE_SECONDS, reduce ETH1_FOLLOW_DISTANCE.
|
||||
|
||||
if (toBlock < remoteFollowBlock) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!lastBlock) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const remoteFollowBlockTimestamp =
|
||||
Math.round(Date.now() / 1000) - this.config.SECONDS_PER_ETH1_BLOCK * this.config.ETH1_FOLLOW_DISTANCE;
|
||||
const blockAfterTargetTimestamp = blocks.find((block) => block.timestamp >= remoteFollowBlockTimestamp);
|
||||
|
||||
if (blockAfterTargetTimestamp) {
|
||||
// Catched up to target timestamp, increase eth1FollowDistance. Limit max config.ETH1_FOLLOW_DISTANCE.
|
||||
// If the block that's right above the timestamp has been fetched now, use it to compute the precise delta.
|
||||
const delta = Math.max(lastBlock.blockNumber - blockAfterTargetTimestamp.blockNumber, 1);
|
||||
this.eth1FollowDistance = Math.min(this.eth1FollowDistance + delta, this.config.ETH1_FOLLOW_DISTANCE);
|
||||
|
||||
return true;
|
||||
}
|
||||
// Blocks are slower than expected, reduce eth1FollowDistance. Limit min CATCHUP_MIN_FOLLOW_DISTANCE
|
||||
const delta =
|
||||
this.eth1FollowDistance -
|
||||
Math.max(this.eth1FollowDistance - ETH1_FOLLOW_DISTANCE_DELTA_IF_SLOW, ETH_MIN_FOLLOW_DISTANCE);
|
||||
this.eth1FollowDistance = this.eth1FollowDistance - delta;
|
||||
|
||||
// Even if the blocks are slow, when we are all caught up as there is no
|
||||
// further possibility to reduce follow distance, we need to call it quits
|
||||
// for now, else it leads to an incessant poll on the EL
|
||||
return delta === 0;
|
||||
}
|
||||
|
||||
private getFromBlockToFetch(lastCachedBlock: number | null): number {
|
||||
if (lastCachedBlock === null) {
|
||||
return this.eth1Provider.deployBlock ?? 0;
|
||||
}
|
||||
return lastCachedBlock + 1;
|
||||
}
|
||||
|
||||
private async getLastProcessedDepositBlockNumber(): Promise<number | null> {
|
||||
if (this.lastProcessedDepositBlockNumber === null) {
|
||||
this.lastProcessedDepositBlockNumber = await this.depositsCache.getHighestDepositEventBlockNumber();
|
||||
}
|
||||
return this.lastProcessedDepositBlockNumber;
|
||||
}
|
||||
}
|
||||
@@ -1,141 +0,0 @@
|
||||
import {byteArrayEquals} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {FilterOptions} from "@lodestar/db";
|
||||
import {phase0, ssz} from "@lodestar/types";
|
||||
import {IBeaconDb} from "../db/index.js";
|
||||
import {Eth1Error, Eth1ErrorCode} from "./errors.js";
|
||||
import {Eth1Block} from "./interface.js";
|
||||
import {getDepositsWithProofs} from "./utils/deposits.js";
|
||||
import {getEth1DataForBlocks} from "./utils/eth1Data.js";
|
||||
import {assertConsecutiveDeposits} from "./utils/eth1DepositEvent.js";
|
||||
|
||||
export class Eth1DepositsCache {
|
||||
unsafeAllowDepositDataOverwrite: boolean;
|
||||
db: IBeaconDb;
|
||||
config: ChainForkConfig;
|
||||
|
||||
constructor(opts: {unsafeAllowDepositDataOverwrite?: boolean}, config: ChainForkConfig, db: IBeaconDb) {
|
||||
this.config = config;
|
||||
this.db = db;
|
||||
this.unsafeAllowDepositDataOverwrite = opts.unsafeAllowDepositDataOverwrite ?? false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a list of `Deposit` objects, within the given deposit index `range`.
|
||||
*
|
||||
* The `depositCount` is used to generate the proofs for the `Deposits`. For example, if we
|
||||
* have 100 proofs, but the Ethereum Consensus chain only acknowledges 50 of them, we must produce our
|
||||
* proofs with respect to a tree size of 50.
|
||||
*/
|
||||
async get(indexRange: FilterOptions<number>, eth1Data: phase0.Eth1Data): Promise<phase0.Deposit[]> {
|
||||
const depositEvents = await this.db.depositEvent.values(indexRange);
|
||||
const depositRootTree = await this.db.depositDataRoot.getDepositRootTree();
|
||||
return getDepositsWithProofs(depositEvents, depositRootTree, eth1Data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add log to cache
|
||||
* This function enforces that `logs` are imported one-by-one with consecutive indexes
|
||||
*/
|
||||
async add(depositEvents: phase0.DepositEvent[]): Promise<void> {
|
||||
assertConsecutiveDeposits(depositEvents);
|
||||
|
||||
const lastLog = await this.db.depositEvent.lastValue();
|
||||
const firstEvent = depositEvents[0];
|
||||
|
||||
// Check, validate and skip if we got any deposit events already present in DB
|
||||
// This can happen if the remote eth1/EL resets its head in these four scenarios:
|
||||
// 1. Remote eth1/EL resynced/restarted from head behind its previous head pre-merge
|
||||
// 2. In a post merge scenario, Lodestar restarted from finalized state from DB which
|
||||
// generally is a few epochs behind the last synced head. This causes eth1 tracker to reset
|
||||
// and refetch the deposits as the lodestar syncs further along (Post merge there is 1-1
|
||||
// correspondence between EL and CL blocks)
|
||||
// 3. The EL reorged beyond the eth1 follow distance.
|
||||
//
|
||||
// While 1. & 2. are benign and we handle them below by checking if the duplicate log fetched
|
||||
// is same as one written in DB. Refer to this issue for some data dump of how this happens
|
||||
// https://github.com/ChainSafe/lodestar/issues/3674
|
||||
//
|
||||
// If the duplicate log fetched is not same as written in DB then its probablu scenario 3.
|
||||
// which would be a catastrophic event for the network (or we messed up real bad!!!).
|
||||
//
|
||||
// So we provide for a way to overwrite this log without deleting full db via
|
||||
// --unsafeAllowDepositDataOverwrite cli flag which will just overwrite the previous tracker data
|
||||
// if any. This option as indicated by its name is unsafe and to be only used if you know what
|
||||
// you are doing.
|
||||
if (lastLog !== null && firstEvent !== undefined) {
|
||||
const newIndex = firstEvent.index;
|
||||
const lastLogIndex = lastLog.index;
|
||||
|
||||
if (!this.unsafeAllowDepositDataOverwrite && firstEvent.index <= lastLog.index) {
|
||||
// lastLogIndex - newIndex + 1 events are duplicate since this is a consecutive log
|
||||
// as asserted by assertConsecutiveDeposits. Splice those events out from depositEvents.
|
||||
const skipEvents = depositEvents.splice(0, lastLogIndex - newIndex + 1);
|
||||
// After splicing skipEvents will contain duplicate events to be checked and validated
|
||||
// and rest of the remaining events in depositEvents could be safely written to DB and
|
||||
// move the tracker along.
|
||||
for (const depositEvent of skipEvents) {
|
||||
const prevDBSerializedEvent = await this.db.depositEvent.getBinary(depositEvent.index);
|
||||
if (!prevDBSerializedEvent) {
|
||||
throw new Eth1Error({code: Eth1ErrorCode.MISSING_DEPOSIT_LOG, newIndex, lastLogIndex});
|
||||
}
|
||||
const serializedEvent = ssz.phase0.DepositEvent.serialize(depositEvent);
|
||||
if (!byteArrayEquals(prevDBSerializedEvent, serializedEvent)) {
|
||||
throw new Eth1Error({code: Eth1ErrorCode.DUPLICATE_DISTINCT_LOG, newIndex, lastLogIndex});
|
||||
}
|
||||
}
|
||||
} else if (newIndex > lastLogIndex + 1) {
|
||||
// deposit events need to be consective, the way we fetch our tracker. If the deposit event
|
||||
// is not consecutive it means either our tracker, or the corresponding eth1/EL
|
||||
// node or the database has messed up. All these failures are critical and the tracker
|
||||
// shouldn't proceed without the resolution of this error.
|
||||
throw new Eth1Error({code: Eth1ErrorCode.NON_CONSECUTIVE_LOGS, newIndex, lastLogIndex});
|
||||
}
|
||||
}
|
||||
|
||||
const depositRoots = depositEvents.map((depositEvent) => ({
|
||||
index: depositEvent.index,
|
||||
root: ssz.phase0.DepositData.hashTreeRoot(depositEvent.depositData),
|
||||
}));
|
||||
|
||||
// Store events after verifying that data is consecutive
|
||||
// depositDataRoot will throw if adding non consecutive roots
|
||||
await this.db.depositDataRoot.batchPutValues(depositRoots);
|
||||
await this.db.depositEvent.batchPutValues(depositEvents);
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends partial eth1 data (depositRoot, depositCount) in a block range (inclusive)
|
||||
* Returned array is sequential and ascending in blockNumber
|
||||
* @param fromBlock
|
||||
* @param toBlock
|
||||
*/
|
||||
async getEth1DataForBlocks(
|
||||
blocks: Eth1Block[],
|
||||
lastProcessedDepositBlockNumber: number | null
|
||||
): Promise<(phase0.Eth1Data & Eth1Block)[]> {
|
||||
const highestBlock = blocks.at(-1)?.blockNumber;
|
||||
return getEth1DataForBlocks(
|
||||
blocks,
|
||||
this.db.depositEvent.valuesStream({lte: highestBlock, reverse: true}),
|
||||
await this.db.depositDataRoot.getDepositRootTree(),
|
||||
lastProcessedDepositBlockNumber
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the highest blockNumber stored in DB if any
|
||||
*/
|
||||
async getHighestDepositEventBlockNumber(): Promise<number | null> {
|
||||
const latestEvent = await this.db.depositEvent.lastValue();
|
||||
return latestEvent?.blockNumber || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the lowest blockNumber stored in DB if any
|
||||
*/
|
||||
async getLowestDepositEventBlockNumber(): Promise<number | null> {
|
||||
const firstEvent = await this.db.depositEvent.firstValue();
|
||||
return firstEvent?.blockNumber || null;
|
||||
}
|
||||
}
|
||||
@@ -1,328 +0,0 @@
|
||||
import {ChainConfig} from "@lodestar/config";
|
||||
import {RootHex} from "@lodestar/types";
|
||||
import {Logger, pruneSetToMax, toRootHex} from "@lodestar/utils";
|
||||
import {ZERO_HASH_HEX} from "../constants/index.js";
|
||||
import {Metrics} from "../metrics/index.js";
|
||||
import {enumToIndexMap} from "../util/enum.js";
|
||||
import {EthJsonRpcBlockRaw, IEth1Provider, PowMergeBlock, PowMergeBlockTimestamp, TDProgress} from "./interface.js";
|
||||
import {dataToRootHex, quantityToBigint, quantityToNum} from "./provider/utils.js";
|
||||
|
||||
export enum StatusCode {
|
||||
STOPPED = "STOPPED",
|
||||
SEARCHING = "SEARCHING",
|
||||
FOUND = "FOUND",
|
||||
}
|
||||
|
||||
type Status =
|
||||
| {code: StatusCode.STOPPED}
|
||||
| {code: StatusCode.SEARCHING}
|
||||
| {code: StatusCode.FOUND; mergeBlock: PowMergeBlock};
|
||||
|
||||
/** For metrics, index order = declaration order of StatusCode */
|
||||
const statusCodeIdx = enumToIndexMap(StatusCode);
|
||||
|
||||
/**
|
||||
* Bounds `blocksByHashCache` cache, imposing a max distance between highest and lowest block numbers.
|
||||
* In case of extreme forking the cache might grow unbounded.
|
||||
*/
|
||||
const MAX_CACHE_POW_BLOCKS = 1024;
|
||||
|
||||
const MAX_TD_RENDER_VALUE = Number.MAX_SAFE_INTEGER;
|
||||
|
||||
export type Eth1MergeBlockTrackerModules = {
|
||||
config: ChainConfig;
|
||||
logger: Logger;
|
||||
signal: AbortSignal;
|
||||
metrics: Metrics | null;
|
||||
};
|
||||
|
||||
// get_pow_block_at_total_difficulty
|
||||
|
||||
/**
|
||||
* Follows the eth1 chain to find a (or multiple?) merge blocks that cross the threshold of total terminal difficulty
|
||||
*
|
||||
* Finding the mergeBlock could be done in demand when proposing pre-merge blocks. However, that would slow block
|
||||
* production during the weeks between BELLATRIX_EPOCH and TTD.
|
||||
*/
|
||||
export class Eth1MergeBlockTracker {
|
||||
private readonly config: ChainConfig;
|
||||
private readonly logger: Logger;
|
||||
private readonly metrics: Metrics | null;
|
||||
|
||||
private readonly blocksByHashCache = new Map<RootHex, PowMergeBlock>();
|
||||
private readonly intervals: NodeJS.Timeout[] = [];
|
||||
|
||||
private status: Status;
|
||||
private latestEth1Block: PowMergeBlockTimestamp | null = null;
|
||||
private getTerminalPowBlockFromEth1Promise: Promise<PowMergeBlock | null> | null = null;
|
||||
private readonly safeTDFactor: bigint;
|
||||
|
||||
constructor(
|
||||
{config, logger, signal, metrics}: Eth1MergeBlockTrackerModules,
|
||||
private readonly eth1Provider: IEth1Provider
|
||||
) {
|
||||
this.config = config;
|
||||
this.logger = logger;
|
||||
this.metrics = metrics;
|
||||
|
||||
this.status = {code: StatusCode.STOPPED};
|
||||
|
||||
signal.addEventListener("abort", () => this.close(), {once: true});
|
||||
|
||||
this.safeTDFactor = getSafeTDFactor(this.config.TERMINAL_TOTAL_DIFFICULTY);
|
||||
const scaledTTD = this.config.TERMINAL_TOTAL_DIFFICULTY / this.safeTDFactor;
|
||||
|
||||
// Only run metrics if necessary
|
||||
if (metrics) {
|
||||
// TTD can't be dynamically changed during execution, register metric once
|
||||
metrics.eth1.eth1MergeTTD.set(Number(scaledTTD as bigint));
|
||||
metrics.eth1.eth1MergeTDFactor.set(Number(this.safeTDFactor as bigint));
|
||||
|
||||
metrics.eth1.eth1MergeStatus.addCollect(() => {
|
||||
// Set merge ttd, merge status and merge block status
|
||||
metrics.eth1.eth1MergeStatus.set(statusCodeIdx[this.status.code]);
|
||||
|
||||
if (this.latestEth1Block !== null) {
|
||||
// Set latestBlock stats
|
||||
metrics.eth1.eth1LatestBlockNumber.set(this.latestEth1Block.number);
|
||||
metrics.eth1.eth1LatestBlockTD.set(Number(this.latestEth1Block.totalDifficulty / this.safeTDFactor));
|
||||
metrics.eth1.eth1LatestBlockTimestamp.set(this.latestEth1Block.timestamp);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the most recent POW block that satisfies the merge block condition
|
||||
*/
|
||||
async getTerminalPowBlock(): Promise<PowMergeBlock | null> {
|
||||
switch (this.status.code) {
|
||||
case StatusCode.STOPPED:
|
||||
// If not module is not polling fetch the mergeBlock explicitly
|
||||
return this.getTerminalPowBlockFromEth1();
|
||||
|
||||
case StatusCode.SEARCHING:
|
||||
// Assume that polling would have found the block
|
||||
return null;
|
||||
|
||||
case StatusCode.FOUND:
|
||||
return this.status.mergeBlock;
|
||||
}
|
||||
}
|
||||
|
||||
getTDProgress(): TDProgress | null {
|
||||
if (this.latestEth1Block === null) {
|
||||
return this.latestEth1Block;
|
||||
}
|
||||
|
||||
const tdDiff = this.config.TERMINAL_TOTAL_DIFFICULTY - this.latestEth1Block.totalDifficulty;
|
||||
|
||||
if (tdDiff > BigInt(0)) {
|
||||
return {
|
||||
ttdHit: false,
|
||||
tdFactor: this.safeTDFactor,
|
||||
tdDiffScaled: Number((tdDiff / this.safeTDFactor) as bigint),
|
||||
ttd: this.config.TERMINAL_TOTAL_DIFFICULTY,
|
||||
td: this.latestEth1Block.totalDifficulty,
|
||||
timestamp: this.latestEth1Block.timestamp,
|
||||
};
|
||||
}
|
||||
return {
|
||||
ttdHit: true,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a POW block by hash checking the local cache first
|
||||
*/
|
||||
async getPowBlock(powBlockHash: string): Promise<PowMergeBlock | null> {
|
||||
// Check cache first
|
||||
const cachedBlock = this.blocksByHashCache.get(powBlockHash);
|
||||
if (cachedBlock) {
|
||||
return cachedBlock;
|
||||
}
|
||||
|
||||
// Fetch from node
|
||||
const blockRaw = await this.eth1Provider.getBlockByHash(powBlockHash);
|
||||
if (blockRaw) {
|
||||
const block = toPowBlock(blockRaw);
|
||||
this.cacheBlock(block);
|
||||
return block;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should only start polling for mergeBlock if:
|
||||
* - after BELLATRIX_FORK_EPOCH
|
||||
* - Beacon node synced
|
||||
* - head state not isMergeTransitionComplete
|
||||
*/
|
||||
startPollingMergeBlock(): void {
|
||||
if (this.status.code !== StatusCode.STOPPED) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.status = {code: StatusCode.SEARCHING};
|
||||
this.logger.info("Starting search for terminal POW block", {
|
||||
TERMINAL_TOTAL_DIFFICULTY: this.config.TERMINAL_TOTAL_DIFFICULTY,
|
||||
});
|
||||
|
||||
const interval = setInterval(() => {
|
||||
// Preemptively try to find merge block and cache it if found.
|
||||
// Future callers of getTerminalPowBlock() will re-use the cached found mergeBlock.
|
||||
this.getTerminalPowBlockFromEth1().catch((e) => {
|
||||
this.logger.error("Error on findMergeBlock", {}, e as Error);
|
||||
this.metrics?.eth1.eth1PollMergeBlockErrors.inc();
|
||||
});
|
||||
}, this.config.SECONDS_PER_ETH1_BLOCK * 1000);
|
||||
|
||||
this.intervals.push(interval);
|
||||
}
|
||||
|
||||
private close(): void {
|
||||
this.intervals.forEach(clearInterval);
|
||||
}
|
||||
|
||||
private async getTerminalPowBlockFromEth1(): Promise<PowMergeBlock | null> {
|
||||
if (!this.getTerminalPowBlockFromEth1Promise) {
|
||||
this.getTerminalPowBlockFromEth1Promise = this.internalGetTerminalPowBlockFromEth1()
|
||||
.then((mergeBlock) => {
|
||||
// Persist found merge block here to affect both caller paths:
|
||||
// - internal searcher
|
||||
// - external caller if STOPPED
|
||||
if (mergeBlock && this.status.code !== StatusCode.FOUND) {
|
||||
if (this.status.code === StatusCode.SEARCHING) {
|
||||
this.close();
|
||||
}
|
||||
|
||||
this.logger.info("Terminal POW block found!", {
|
||||
hash: mergeBlock.blockHash,
|
||||
number: mergeBlock.number,
|
||||
totalDifficulty: mergeBlock.totalDifficulty,
|
||||
});
|
||||
|
||||
this.status = {code: StatusCode.FOUND, mergeBlock};
|
||||
this.metrics?.eth1.eth1MergeBlockDetails.set(
|
||||
{
|
||||
terminalBlockHash: mergeBlock.blockHash,
|
||||
// Convert all number/bigints to string labels
|
||||
terminalBlockNumber: mergeBlock.number.toString(10),
|
||||
terminalBlockTD: mergeBlock.totalDifficulty.toString(10),
|
||||
},
|
||||
1
|
||||
);
|
||||
}
|
||||
|
||||
return mergeBlock;
|
||||
})
|
||||
.finally(() => {
|
||||
this.getTerminalPowBlockFromEth1Promise = null;
|
||||
});
|
||||
} else {
|
||||
// This should no happen, since getTerminalPowBlockFromEth1() should resolve faster than SECONDS_PER_ETH1_BLOCK.
|
||||
// else something is wrong: the el-cl comms are two slow, or the backsearch got stuck in a deep search.
|
||||
this.metrics?.eth1.getTerminalPowBlockPromiseCacheHit.inc();
|
||||
}
|
||||
|
||||
return this.getTerminalPowBlockFromEth1Promise;
|
||||
}
|
||||
|
||||
/**
|
||||
* **internal** + **unsafe** since it can create multiple backward searches that overload the eth1 client.
|
||||
* Must be called in a wrapper to ensure that there's only once concurrent call to this fn.
|
||||
*/
|
||||
private async internalGetTerminalPowBlockFromEth1(): Promise<PowMergeBlock | null> {
|
||||
// Search merge block by hash
|
||||
// Terminal block hash override takes precedence over terminal total difficulty
|
||||
const terminalBlockHash = toRootHex(this.config.TERMINAL_BLOCK_HASH);
|
||||
if (terminalBlockHash !== ZERO_HASH_HEX) {
|
||||
const block = await this.getPowBlock(terminalBlockHash);
|
||||
if (block) {
|
||||
return block;
|
||||
}
|
||||
// if a TERMINAL_BLOCK_HASH other than ZERO_HASH is configured and we can't find it, return NONE
|
||||
return null;
|
||||
}
|
||||
|
||||
// Search merge block by TTD
|
||||
const latestBlockRaw = await this.eth1Provider.getBlockByNumber("latest");
|
||||
if (!latestBlockRaw) {
|
||||
throw Error("getBlockByNumber('latest') returned null");
|
||||
}
|
||||
|
||||
let block = toPowBlock(latestBlockRaw);
|
||||
this.latestEth1Block = {...block, timestamp: quantityToNum(latestBlockRaw.timestamp)};
|
||||
this.cacheBlock(block);
|
||||
|
||||
// This code path to look backwards for the merge block is only necessary if:
|
||||
// - The network has not yet found the merge block
|
||||
// - There are descendants of the merge block in the eth1 chain
|
||||
// For the search below to require more than a few hops, multiple block proposers in a row must fail to detect
|
||||
// an existing merge block. Such situation is extremely unlikely, so this search is left un-optimized. Since
|
||||
// this class can start eagerly looking for the merge block when not necessary, startPollingMergeBlock() should
|
||||
// only be called when there is certainty that a mergeBlock search is necessary.
|
||||
|
||||
while (true) {
|
||||
if (block.totalDifficulty < this.config.TERMINAL_TOTAL_DIFFICULTY) {
|
||||
// TTD not reached yet
|
||||
return null;
|
||||
}
|
||||
|
||||
// else block.totalDifficulty >= this.config.TERMINAL_TOTAL_DIFFICULTY
|
||||
// Potential mergeBlock! Must find the first block that passes TTD
|
||||
|
||||
// Allow genesis block to reach TTD https://github.com/ethereum/consensus-specs/pull/2719
|
||||
if (block.parentHash === ZERO_HASH_HEX) {
|
||||
return block;
|
||||
}
|
||||
|
||||
const parent = await this.getPowBlock(block.parentHash);
|
||||
if (!parent) {
|
||||
throw Error(`Unknown parent of block with TD>TTD ${block.parentHash}`);
|
||||
}
|
||||
|
||||
this.metrics?.eth1.eth1ParentBlocksFetched.inc();
|
||||
|
||||
// block.td > TTD && parent.td < TTD => block is mergeBlock
|
||||
if (parent.totalDifficulty < this.config.TERMINAL_TOTAL_DIFFICULTY) {
|
||||
// Is terminal total difficulty block AND has verified block -> parent relationship
|
||||
return block;
|
||||
}
|
||||
block = parent;
|
||||
}
|
||||
}
|
||||
|
||||
private cacheBlock(block: PowMergeBlock): void {
|
||||
this.blocksByHashCache.set(block.blockHash, block);
|
||||
pruneSetToMax(this.blocksByHashCache, MAX_CACHE_POW_BLOCKS);
|
||||
}
|
||||
}
|
||||
|
||||
export function toPowBlock(block: EthJsonRpcBlockRaw): PowMergeBlock {
|
||||
// Validate untrusted data from API
|
||||
return {
|
||||
number: quantityToNum(block.number),
|
||||
blockHash: dataToRootHex(block.hash),
|
||||
parentHash: dataToRootHex(block.parentHash),
|
||||
totalDifficulty: quantityToBigint(block.totalDifficulty),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* TTD values can be very large, for xDAI > 1e45. So scale down.
|
||||
* To be good, TTD should be rendered as a number < Number.MAX_TD_RENDER_VALUE ~= 9e15
|
||||
*/
|
||||
export function getSafeTDFactor(ttd: bigint): bigint {
|
||||
const safeIntegerMult = ttd / BigInt(MAX_TD_RENDER_VALUE);
|
||||
|
||||
// TTD < MAX_TD_RENDER_VALUE, no need to scale down
|
||||
if (safeIntegerMult === BigInt(0)) {
|
||||
return BigInt(1);
|
||||
}
|
||||
|
||||
// Return closest power of 10 to ensure TD < max
|
||||
const safeIntegerMultDigits = safeIntegerMult.toString(10).length;
|
||||
return BigInt(10) ** BigInt(safeIntegerMultDigits);
|
||||
}
|
||||
@@ -1,157 +0,0 @@
|
||||
import {CachedBeaconStateAllForks} from "@lodestar/state-transition";
|
||||
import {Root} from "@lodestar/types";
|
||||
import {fromHex} from "@lodestar/utils";
|
||||
import {Eth1DepositDataTracker, Eth1DepositDataTrackerModules} from "./eth1DepositDataTracker.js";
|
||||
import {Eth1MergeBlockTracker, Eth1MergeBlockTrackerModules} from "./eth1MergeBlockTracker.js";
|
||||
import {Eth1DataAndDeposits, IEth1ForBlockProduction, IEth1Provider, PowMergeBlock, TDProgress} from "./interface.js";
|
||||
import {Eth1Options} from "./options.js";
|
||||
import {Eth1Provider} from "./provider/eth1Provider.js";
|
||||
export {Eth1Provider};
|
||||
export type {IEth1ForBlockProduction, IEth1Provider};
|
||||
|
||||
// This module encapsulates all consumer functionality to the execution node (formerly eth1). The execution client
|
||||
// has to:
|
||||
//
|
||||
// - For genesis, the beacon node must follow the eth1 chain: get all deposit events + blocks within that range.
|
||||
// Once the genesis conditions are met, start the POS chain with the resulting state. The logic is similar to the
|
||||
// two points below, but the implementation is specialized for each scenario.
|
||||
//
|
||||
// - Follow the eth1 block chain to validate eth1Data votes. It needs all consecutive blocks within a specific range
|
||||
// and at a distance from the head.
|
||||
// ETH1_FOLLOW_DISTANCE uint64(2**11) (= 2,048) Eth1 blocks ~8 hours
|
||||
// EPOCHS_PER_ETH1_VOTING_PERIOD uint64(2**6) (= 64) epochs ~6.8 hours
|
||||
//
|
||||
// - Fetch ALL deposit events from the deposit contract to build the deposit tree and validate future merkle proofs.
|
||||
// Then it must follow deposit events at a distance roughly similar to the `ETH1_FOLLOW_DISTANCE` parameter above.
|
||||
//
|
||||
// - [New bellatrix]: After BELLATRIX_FORK_EPOCH, it must fetch the block with hash
|
||||
// `state.eth1_data.block_hash` to compute `terminal_total_difficulty`. Note this may change with
|
||||
// https://github.com/ethereum/consensus-specs/issues/2603.
|
||||
//
|
||||
// - [New bellatrix]: On block production post BELLATRIX_FORK_EPOCH, pre merge, the beacon node must find the merge block
|
||||
// crossing the `terminal_total_difficulty` boundary and include it in the block. After the merge block production
|
||||
// will just use `execution_engine.assemble_block` without fetching individual blocks.
|
||||
//
|
||||
// - [New bellatrix]: Fork-choice must validate the merge block ensuring it crossed the `terminal_total_difficulty`
|
||||
// boundary, so it must fetch the POW block referenced in the merge block + its POW parent block.
|
||||
//
|
||||
// With the merge the beacon node has to follow the eth1 chain at two distances:
|
||||
// 1. At `ETH1_FOLLOW_DISTANCE` for eth1Data to be re-org safe
|
||||
// 2. At the head to get the first merge block, tolerating possible re-orgs
|
||||
//
|
||||
// Then both streams of blocks should not be merged since it's harder to guard against re-orgs from (2) to (1).
|
||||
|
||||
export function initializeEth1ForBlockProduction(
|
||||
opts: Eth1Options,
|
||||
modules: Pick<Eth1DepositDataTrackerModules, "db" | "config" | "metrics" | "logger" | "signal">
|
||||
): IEth1ForBlockProduction {
|
||||
if (opts.enabled) {
|
||||
return new Eth1ForBlockProduction(opts, {
|
||||
config: modules.config,
|
||||
db: modules.db,
|
||||
metrics: modules.metrics,
|
||||
logger: modules.logger,
|
||||
signal: modules.signal,
|
||||
});
|
||||
}
|
||||
return new Eth1ForBlockProductionDisabled();
|
||||
}
|
||||
|
||||
export class Eth1ForBlockProduction implements IEth1ForBlockProduction {
|
||||
private readonly eth1DepositDataTracker: Eth1DepositDataTracker | null;
|
||||
private readonly eth1MergeBlockTracker: Eth1MergeBlockTracker;
|
||||
|
||||
constructor(
|
||||
opts: Eth1Options,
|
||||
modules: Eth1DepositDataTrackerModules & Eth1MergeBlockTrackerModules & {eth1Provider?: IEth1Provider}
|
||||
) {
|
||||
const eth1Provider =
|
||||
modules.eth1Provider ||
|
||||
new Eth1Provider(
|
||||
modules.config,
|
||||
{...opts, logger: modules.logger},
|
||||
modules.signal,
|
||||
modules.metrics?.eth1HttpClient
|
||||
);
|
||||
|
||||
this.eth1DepositDataTracker = opts.disableEth1DepositDataTracker
|
||||
? null
|
||||
: new Eth1DepositDataTracker(opts, modules, eth1Provider);
|
||||
|
||||
this.eth1MergeBlockTracker = new Eth1MergeBlockTracker(modules, eth1Provider);
|
||||
}
|
||||
|
||||
async getEth1DataAndDeposits(state: CachedBeaconStateAllForks): Promise<Eth1DataAndDeposits> {
|
||||
if (this.eth1DepositDataTracker === null) {
|
||||
return {eth1Data: state.eth1Data, deposits: []};
|
||||
}
|
||||
return this.eth1DepositDataTracker.getEth1DataAndDeposits(state);
|
||||
}
|
||||
|
||||
async getTerminalPowBlock(): Promise<Root | null> {
|
||||
const block = await this.eth1MergeBlockTracker.getTerminalPowBlock();
|
||||
return block && fromHex(block.blockHash);
|
||||
}
|
||||
|
||||
getPowBlock(powBlockHash: string): Promise<PowMergeBlock | null> {
|
||||
return this.eth1MergeBlockTracker.getPowBlock(powBlockHash);
|
||||
}
|
||||
|
||||
getTDProgress(): TDProgress | null {
|
||||
return this.eth1MergeBlockTracker.getTDProgress();
|
||||
}
|
||||
|
||||
startPollingMergeBlock(): void {
|
||||
this.eth1MergeBlockTracker.startPollingMergeBlock();
|
||||
}
|
||||
|
||||
isPollingEth1Data(): boolean {
|
||||
return this.eth1DepositDataTracker?.isPollingEth1Data() ?? false;
|
||||
}
|
||||
|
||||
stopPollingEth1Data(): void {
|
||||
this.eth1DepositDataTracker?.stopPollingEth1Data();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Disabled version of Eth1ForBlockProduction
|
||||
* May produce invalid blocks by not adding new deposits and voting for the same eth1Data
|
||||
*/
|
||||
export class Eth1ForBlockProductionDisabled implements IEth1ForBlockProduction {
|
||||
/**
|
||||
* Returns same eth1Data as in state and no deposits
|
||||
* May produce invalid blocks if deposits have to be added
|
||||
*/
|
||||
async getEth1DataAndDeposits(state: CachedBeaconStateAllForks): Promise<Eth1DataAndDeposits> {
|
||||
return {eth1Data: state.eth1Data, deposits: []};
|
||||
}
|
||||
|
||||
/**
|
||||
* Will miss the oportunity to propose the merge block but will still produce valid blocks
|
||||
*/
|
||||
async getTerminalPowBlock(): Promise<Root | null> {
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Will not be able to validate the merge block */
|
||||
async getPowBlock(_powBlockHash: string): Promise<PowMergeBlock | null> {
|
||||
throw Error("eth1 must be enabled to verify merge block");
|
||||
}
|
||||
|
||||
getTDProgress(): TDProgress | null {
|
||||
return null;
|
||||
}
|
||||
|
||||
isPollingEth1Data(): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
startPollingMergeBlock(): void {
|
||||
// Ignore
|
||||
}
|
||||
|
||||
stopPollingEth1Data(): void {
|
||||
// Ignore
|
||||
}
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {CachedBeaconStateAllForks} from "@lodestar/state-transition";
|
||||
import {Root, RootHex, phase0} from "@lodestar/types";
|
||||
|
||||
export type EthJsonRpcBlockRaw = {
|
||||
/** the block number. null when its pending block. `"0x1b4"` */
|
||||
number: string;
|
||||
/** 32 Bytes - hash of the block. null when its pending block. `"0xdc0818cf78f21a8e70579cb46a43643f78291264dda342ae31049421c82d21ae"` */
|
||||
hash: string;
|
||||
/** 32 Bytes - hash of the parent block. `"0xe99e022112df268087ea7eafaf4790497fd21dbeeb6bd7a1721df161a6657a54"` */
|
||||
parentHash: string;
|
||||
/**
|
||||
* integer of the total difficulty of the chain until this block. `"0x78ed983323d"`.
|
||||
* Current mainnet value is 0x684de10dc5c03f006b6, 75 bits so requires a bigint.
|
||||
*/
|
||||
totalDifficulty: string;
|
||||
/** the unix timestamp for when the block was collated. `"0x55ba467c"` */
|
||||
timestamp: string;
|
||||
};
|
||||
|
||||
export interface IEth1Provider {
|
||||
deployBlock: number;
|
||||
getBlockNumber(): Promise<number>;
|
||||
/** Returns HTTP code 200 + value=null if block is not found */
|
||||
getBlockByNumber(blockNumber: number | "latest"): Promise<EthJsonRpcBlockRaw | null>;
|
||||
/** Returns HTTP code 200 + value=null if block is not found */
|
||||
getBlockByHash(blockHashHex: string): Promise<EthJsonRpcBlockRaw | null>;
|
||||
/** null returns are ignored, may return a different number of blocks than expected */
|
||||
getBlocksByNumber(fromBlock: number, toBlock: number): Promise<EthJsonRpcBlockRaw[]>;
|
||||
getDepositEvents(fromBlock: number, toBlock: number): Promise<phase0.DepositEvent[]>;
|
||||
validateContract(): Promise<void>;
|
||||
getState(): Eth1ProviderState;
|
||||
}
|
||||
|
||||
export enum Eth1ProviderState {
|
||||
ONLINE = "ONLINE",
|
||||
OFFLINE = "OFFLINE",
|
||||
ERROR = "ERROR",
|
||||
AUTH_FAILED = "AUTH_FAILED",
|
||||
}
|
||||
|
||||
export type Eth1DataAndDeposits = {
|
||||
eth1Data: phase0.Eth1Data;
|
||||
deposits: phase0.Deposit[];
|
||||
};
|
||||
|
||||
export interface IEth1ForBlockProduction {
|
||||
getEth1DataAndDeposits(state: CachedBeaconStateAllForks): Promise<Eth1DataAndDeposits>;
|
||||
|
||||
/** Returns the most recent POW block that satisfies the merge block condition */
|
||||
getTerminalPowBlock(): Promise<Root | null>;
|
||||
/** Get a POW block by hash checking the local cache first */
|
||||
getPowBlock(powBlockHash: string): Promise<PowMergeBlock | null>;
|
||||
|
||||
/** Get current TD progress for log notifier */
|
||||
getTDProgress(): TDProgress | null;
|
||||
|
||||
/**
|
||||
* Should only start polling for mergeBlock if:
|
||||
* - after BELLATRIX_FORK_EPOCH
|
||||
* - Beacon node synced
|
||||
* - head state not isMergeTransitionComplete
|
||||
*/
|
||||
startPollingMergeBlock(): void;
|
||||
|
||||
isPollingEth1Data(): boolean;
|
||||
|
||||
/**
|
||||
* Should stop polling eth1Data after a Electra block is finalized AND deposit_requests_start_index is reached
|
||||
*/
|
||||
stopPollingEth1Data(): void;
|
||||
}
|
||||
|
||||
/** Different Eth1Block from phase0.Eth1Block with blockHash */
|
||||
export type Eth1Block = {
|
||||
blockHash: Uint8Array;
|
||||
blockNumber: number;
|
||||
timestamp: number;
|
||||
};
|
||||
|
||||
export type PowMergeBlock = {
|
||||
number: number;
|
||||
blockHash: RootHex;
|
||||
parentHash: RootHex;
|
||||
totalDifficulty: bigint;
|
||||
};
|
||||
|
||||
export type PowMergeBlockTimestamp = PowMergeBlock & {
|
||||
/** in seconds */
|
||||
timestamp: number;
|
||||
};
|
||||
|
||||
export type TDProgress =
|
||||
| {
|
||||
ttdHit: false;
|
||||
/** Power of ten by which tdDiffScaled is scaled down */
|
||||
tdFactor: bigint;
|
||||
/** (TERMINAL_TOTAL_DIFFICULTY - block.totalDifficulty) / tdFactor */
|
||||
tdDiffScaled: number;
|
||||
/** TERMINAL_TOTAL_DIFFICULTY */
|
||||
ttd: bigint;
|
||||
/** totalDifficulty of latest fetched eth1 block */
|
||||
td: bigint;
|
||||
/** timestamp in sec of latest fetched eth1 block */
|
||||
timestamp: number;
|
||||
}
|
||||
| {ttdHit: true};
|
||||
|
||||
export type BatchDepositEvents = {
|
||||
depositEvents: phase0.DepositEvent[];
|
||||
blockNumber: number;
|
||||
};
|
||||
|
||||
export type Eth1Streamer = {
|
||||
getDepositsStream(fromBlock: number): AsyncGenerator<BatchDepositEvents>;
|
||||
getDepositsAndBlockStreamForGenesis(fromBlock: number): AsyncGenerator<[phase0.DepositEvent[], phase0.Eth1Block]>;
|
||||
};
|
||||
|
||||
export type IEth1StreamParams = Pick<
|
||||
BeaconConfig,
|
||||
"ETH1_FOLLOW_DISTANCE" | "MIN_GENESIS_TIME" | "GENESIS_DELAY" | "SECONDS_PER_ETH1_BLOCK"
|
||||
> & {
|
||||
maxBlocksPerPoll: number;
|
||||
};
|
||||
|
||||
export type IJson = string | number | boolean | undefined | IJson[] | {[key: string]: IJson};
|
||||
|
||||
export interface RpcPayload<P = IJson[]> {
|
||||
method: string;
|
||||
params: P;
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
export type Eth1Options = {
|
||||
enabled?: boolean;
|
||||
disableEth1DepositDataTracker?: boolean;
|
||||
providerUrls?: string[];
|
||||
/**
|
||||
* jwtSecretHex is the jwt secret if the eth1 modules should ping the jwt auth
|
||||
* protected engine endpoints.
|
||||
*/
|
||||
jwtSecretHex?: string;
|
||||
jwtId?: string;
|
||||
jwtVersion?: string;
|
||||
depositContractDeployBlock?: number;
|
||||
unsafeAllowDepositDataOverwrite?: boolean;
|
||||
/**
|
||||
* Vote for a specific eth1_data regardless of validity and existing votes.
|
||||
* hex encoded ssz serialized Eth1Data type.
|
||||
*/
|
||||
forcedEth1DataVote?: string;
|
||||
};
|
||||
|
||||
export const DEFAULT_PROVIDER_URLS = ["http://localhost:8545"];
|
||||
|
||||
export const defaultEth1Options: Eth1Options = {
|
||||
enabled: true,
|
||||
providerUrls: DEFAULT_PROVIDER_URLS,
|
||||
depositContractDeployBlock: 0,
|
||||
unsafeAllowDepositDataOverwrite: false,
|
||||
};
|
||||
@@ -1,229 +0,0 @@
|
||||
import {ChainConfig} from "@lodestar/config";
|
||||
import {Logger} from "@lodestar/logger";
|
||||
import {phase0} from "@lodestar/types";
|
||||
import {
|
||||
FetchError,
|
||||
createElapsedTimeTracker,
|
||||
fromHex,
|
||||
isErrorAborted,
|
||||
isFetchError,
|
||||
toHex,
|
||||
toPrintableUrl,
|
||||
} from "@lodestar/utils";
|
||||
import {HTTP_CONNECTION_ERROR_CODES, HTTP_FATAL_ERROR_CODES} from "../../execution/engine/utils.js";
|
||||
import {isValidAddress} from "../../util/address.js";
|
||||
import {linspace} from "../../util/numpy.js";
|
||||
import {Eth1Block, Eth1ProviderState, EthJsonRpcBlockRaw, IEth1Provider} from "../interface.js";
|
||||
import {DEFAULT_PROVIDER_URLS, Eth1Options} from "../options.js";
|
||||
import {depositEventTopics, parseDepositLog} from "../utils/depositContract.js";
|
||||
import {
|
||||
ErrorJsonRpcResponse,
|
||||
HttpRpcError,
|
||||
JsonRpcHttpClient,
|
||||
JsonRpcHttpClientEvent,
|
||||
JsonRpcHttpClientMetrics,
|
||||
ReqOpts,
|
||||
} from "./jsonRpcHttpClient.js";
|
||||
import {dataToBytes, isJsonRpcTruncatedError, numToQuantity, quantityToNum} from "./utils.js";
|
||||
|
||||
/**
|
||||
* Binds return types to Ethereum JSON RPC methods
|
||||
*/
|
||||
type EthJsonRpcReturnTypes = {
|
||||
eth_getBlockByNumber: EthJsonRpcBlockRaw | null;
|
||||
eth_getBlockByHash: EthJsonRpcBlockRaw | null;
|
||||
eth_blockNumber: string;
|
||||
eth_getCode: string;
|
||||
eth_getLogs: {
|
||||
removed: boolean;
|
||||
logIndex: string;
|
||||
transactionIndex: string;
|
||||
transactionHash: string;
|
||||
blockHash: string;
|
||||
blockNumber: string;
|
||||
address: string;
|
||||
data: string;
|
||||
topics: string[];
|
||||
}[];
|
||||
};
|
||||
|
||||
// Define static options once to prevent extra allocations
|
||||
const getBlocksByNumberOpts: ReqOpts = {routeId: "getBlockByNumber_batched"};
|
||||
const getBlockByNumberOpts: ReqOpts = {routeId: "getBlockByNumber"};
|
||||
const getBlockByHashOpts: ReqOpts = {routeId: "getBlockByHash"};
|
||||
const getBlockNumberOpts: ReqOpts = {routeId: "getBlockNumber"};
|
||||
const getLogsOpts: ReqOpts = {routeId: "getLogs"};
|
||||
|
||||
const isOneMinutePassed = createElapsedTimeTracker({minElapsedTime: 60_000});
|
||||
|
||||
export class Eth1Provider implements IEth1Provider {
|
||||
readonly deployBlock: number;
|
||||
private readonly depositContractAddress: string;
|
||||
private readonly rpc: JsonRpcHttpClient;
|
||||
// The default state is ONLINE, it will be updated to offline if we receive a http error
|
||||
private state: Eth1ProviderState = Eth1ProviderState.ONLINE;
|
||||
private logger?: Logger;
|
||||
|
||||
constructor(
|
||||
config: Pick<ChainConfig, "DEPOSIT_CONTRACT_ADDRESS">,
|
||||
opts: Pick<Eth1Options, "depositContractDeployBlock" | "providerUrls" | "jwtSecretHex" | "jwtId" | "jwtVersion"> & {
|
||||
logger?: Logger;
|
||||
},
|
||||
signal?: AbortSignal,
|
||||
metrics?: JsonRpcHttpClientMetrics | null
|
||||
) {
|
||||
this.logger = opts.logger;
|
||||
this.deployBlock = opts.depositContractDeployBlock ?? 0;
|
||||
this.depositContractAddress = toHex(config.DEPOSIT_CONTRACT_ADDRESS);
|
||||
|
||||
const providerUrls = opts.providerUrls ?? DEFAULT_PROVIDER_URLS;
|
||||
this.rpc = new JsonRpcHttpClient(providerUrls, {
|
||||
signal,
|
||||
// Don't fallback with is truncated error. Throw early and let the retry on this class handle it
|
||||
shouldNotFallback: isJsonRpcTruncatedError,
|
||||
jwtSecret: opts.jwtSecretHex ? fromHex(opts.jwtSecretHex) : undefined,
|
||||
jwtId: opts.jwtId,
|
||||
jwtVersion: opts.jwtVersion,
|
||||
metrics: metrics,
|
||||
});
|
||||
this.logger?.info("Eth1 provider", {urls: providerUrls.map(toPrintableUrl).toString()});
|
||||
|
||||
this.rpc.emitter.on(JsonRpcHttpClientEvent.RESPONSE, () => {
|
||||
const oldState = this.state;
|
||||
this.state = Eth1ProviderState.ONLINE;
|
||||
|
||||
if (oldState !== Eth1ProviderState.ONLINE) {
|
||||
this.logger?.info("Eth1 provider is back online", {oldState, newState: this.state});
|
||||
}
|
||||
});
|
||||
|
||||
this.rpc.emitter.on(JsonRpcHttpClientEvent.ERROR, ({error}) => {
|
||||
if (isErrorAborted(error)) {
|
||||
this.state = Eth1ProviderState.ONLINE;
|
||||
} else if ((error as unknown) instanceof HttpRpcError || (error as unknown) instanceof ErrorJsonRpcResponse) {
|
||||
this.state = Eth1ProviderState.ERROR;
|
||||
} else if (error && isFetchError(error) && HTTP_FATAL_ERROR_CODES.includes((error as FetchError).code)) {
|
||||
this.state = Eth1ProviderState.OFFLINE;
|
||||
} else if (error && isFetchError(error) && HTTP_CONNECTION_ERROR_CODES.includes((error as FetchError).code)) {
|
||||
this.state = Eth1ProviderState.AUTH_FAILED;
|
||||
}
|
||||
|
||||
if (this.state !== Eth1ProviderState.ONLINE && isOneMinutePassed()) {
|
||||
this.logger?.error(
|
||||
"Eth1 provider error",
|
||||
{
|
||||
state: this.state,
|
||||
lastErrorAt: new Date(Date.now() - isOneMinutePassed.msSinceLastCall).toLocaleTimeString(),
|
||||
},
|
||||
error
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
getState(): Eth1ProviderState {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
async validateContract(): Promise<void> {
|
||||
if (!isValidAddress(this.depositContractAddress)) {
|
||||
throw Error(`Invalid contract address: ${this.depositContractAddress}`);
|
||||
}
|
||||
|
||||
const code = await this.getCode(this.depositContractAddress);
|
||||
if (!code || code === "0x") {
|
||||
throw new Error(`There is no deposit contract at given address: ${this.depositContractAddress}`);
|
||||
}
|
||||
}
|
||||
|
||||
async getDepositEvents(fromBlock: number, toBlock: number): Promise<phase0.DepositEvent[]> {
|
||||
const logsRawArr = await this.getLogs({
|
||||
fromBlock,
|
||||
toBlock,
|
||||
address: this.depositContractAddress,
|
||||
topics: depositEventTopics,
|
||||
});
|
||||
return logsRawArr.flat(1).map((log) => parseDepositLog(log));
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches an arbitrary array of block numbers in batch
|
||||
*/
|
||||
async getBlocksByNumber(fromBlock: number, toBlock: number): Promise<EthJsonRpcBlockRaw[]> {
|
||||
const method = "eth_getBlockByNumber";
|
||||
const blocksArr = await this.rpc.fetchBatch<EthJsonRpcReturnTypes[typeof method]>(
|
||||
linspace(fromBlock, toBlock).map((blockNumber) => ({method, params: [numToQuantity(blockNumber), false]})),
|
||||
getBlocksByNumberOpts
|
||||
);
|
||||
const blocks: EthJsonRpcBlockRaw[] = [];
|
||||
for (const block of blocksArr.flat(1)) {
|
||||
if (block) blocks.push(block);
|
||||
}
|
||||
return blocks;
|
||||
}
|
||||
|
||||
async getBlockByNumber(blockNumber: number | "latest"): Promise<EthJsonRpcBlockRaw | null> {
|
||||
const method = "eth_getBlockByNumber";
|
||||
const blockNumberHex = typeof blockNumber === "string" ? blockNumber : numToQuantity(blockNumber);
|
||||
return this.rpc.fetch<EthJsonRpcReturnTypes[typeof method]>(
|
||||
// false = include only transaction roots, not full objects
|
||||
{method, params: [blockNumberHex, false]},
|
||||
getBlockByNumberOpts
|
||||
);
|
||||
}
|
||||
|
||||
async getBlockByHash(blockHashHex: string): Promise<EthJsonRpcBlockRaw | null> {
|
||||
const method = "eth_getBlockByHash";
|
||||
return this.rpc.fetch<EthJsonRpcReturnTypes[typeof method]>(
|
||||
// false = include only transaction roots, not full objects
|
||||
{method, params: [blockHashHex, false]},
|
||||
getBlockByHashOpts
|
||||
);
|
||||
}
|
||||
|
||||
async getBlockNumber(): Promise<number> {
|
||||
const method = "eth_blockNumber";
|
||||
const blockNumberRaw = await this.rpc.fetch<EthJsonRpcReturnTypes[typeof method]>(
|
||||
{method, params: []},
|
||||
getBlockNumberOpts
|
||||
);
|
||||
return parseInt(blockNumberRaw, 16);
|
||||
}
|
||||
|
||||
async getCode(address: string): Promise<string> {
|
||||
const method = "eth_getCode";
|
||||
return this.rpc.fetch<EthJsonRpcReturnTypes[typeof method]>({method, params: [address, "latest"]});
|
||||
}
|
||||
|
||||
async getLogs(options: {
|
||||
fromBlock: number;
|
||||
toBlock: number;
|
||||
address: string;
|
||||
topics: string[];
|
||||
}): Promise<{blockNumber: number; data: string; topics: string[]}[]> {
|
||||
const method = "eth_getLogs";
|
||||
const hexOptions = {
|
||||
...options,
|
||||
fromBlock: numToQuantity(options.fromBlock),
|
||||
toBlock: numToQuantity(options.toBlock),
|
||||
};
|
||||
const logsRaw = await this.rpc.fetch<EthJsonRpcReturnTypes[typeof method]>(
|
||||
{method, params: [hexOptions]},
|
||||
getLogsOpts
|
||||
);
|
||||
return logsRaw.map((logRaw) => ({
|
||||
blockNumber: parseInt(logRaw.blockNumber, 16),
|
||||
data: logRaw.data,
|
||||
topics: logRaw.topics,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
export function parseEth1Block(blockRaw: EthJsonRpcBlockRaw): Eth1Block {
|
||||
if (typeof blockRaw !== "object") throw Error("block is not an object");
|
||||
return {
|
||||
blockHash: dataToBytes(blockRaw.hash, 32),
|
||||
blockNumber: quantityToNum(blockRaw.number, "block.number"),
|
||||
timestamp: quantityToNum(blockRaw.timestamp, "block.timestamp"),
|
||||
};
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
import {RootHex} from "@lodestar/types";
|
||||
import {bigIntToBytes, bytesToBigInt, fromHex, fromHexInto, toHex} from "@lodestar/utils";
|
||||
import {ErrorParseJson} from "./jsonRpcHttpClient.js";
|
||||
|
||||
/** QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API */
|
||||
export type QUANTITY = string;
|
||||
/** DATA as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API */
|
||||
export type DATA = string;
|
||||
|
||||
export const rootHexRegex = /^0x[a-fA-F0-9]{64}$/;
|
||||
|
||||
export function numberToHex(n: number | bigint): string {
|
||||
return "0x" + n.toString(16);
|
||||
}
|
||||
|
||||
export function isJsonRpcTruncatedError(error: Error): boolean {
|
||||
return (
|
||||
// Truncated responses usually get as 200 but since it's truncated the JSON will be invalid
|
||||
error instanceof ErrorParseJson ||
|
||||
// Otherwise guess Infura error message of too many events
|
||||
(error instanceof Error && error.message.includes("query returned more than 10000 results")) ||
|
||||
// Nethermind enforces limits on JSON RPC batch calls
|
||||
(error instanceof Error && error.message.toLowerCase().includes("batch size limit exceeded"))
|
||||
);
|
||||
}
|
||||
|
||||
export function bytesToHex(bytes: Uint8Array): string {
|
||||
// Handle special case in Ethereum hex formating where hex values may include a single letter
|
||||
// 0x0, 0x1 are valid values
|
||||
if (bytes.length === 1 && bytes[0] <= 0xf) {
|
||||
return "0x" + bytes[0].toString(16);
|
||||
}
|
||||
|
||||
return toHex(bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API
|
||||
*
|
||||
* When encoding QUANTITIES (integers, numbers): encode as hex, prefix with “0x”, the most compact representation (slight exception: zero should be represented as “0x0”). Examples:
|
||||
* - 0x41 (65 in decimal)
|
||||
* - 0x400 (1024 in decimal)
|
||||
* - WRONG: 0x (should always have at least one digit - zero is “0x0”)
|
||||
* - WRONG: 0x0400 (no leading zeroes allowed)
|
||||
* - WRONG: ff (must be prefixed 0x)
|
||||
*/
|
||||
export function numToQuantity(num: number | bigint): QUANTITY {
|
||||
return "0x" + num.toString(16);
|
||||
}
|
||||
|
||||
/**
|
||||
* QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API
|
||||
*/
|
||||
export function quantityToNum(hex: QUANTITY, id = ""): number {
|
||||
const num = parseInt(hex, 16);
|
||||
if (Number.isNaN(num) || num < 0) throw Error(`Invalid hex decimal ${id} '${hex}'`);
|
||||
return num;
|
||||
}
|
||||
|
||||
/**
|
||||
* QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API.
|
||||
* Typesafe fn to convert hex string to bigint. The BigInt constructor param is any
|
||||
*/
|
||||
export function quantityToBigint(hex: QUANTITY, id = ""): bigint {
|
||||
try {
|
||||
return BigInt(hex);
|
||||
} catch (e) {
|
||||
throw Error(`Invalid hex bigint ${id} '${hex}': ${(e as Error).message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API.
|
||||
*/
|
||||
export function quantityToBytes(hex: QUANTITY): Uint8Array {
|
||||
const bn = quantityToBigint(hex);
|
||||
return bigIntToBytes(bn, 32, "le");
|
||||
}
|
||||
|
||||
/**
|
||||
* QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API.
|
||||
* Compress a 32 ByteVector into a QUANTITY
|
||||
*/
|
||||
export function bytesToQuantity(bytes: Uint8Array): QUANTITY {
|
||||
const bn = bytesToBigInt(bytes, "le");
|
||||
return numToQuantity(bn);
|
||||
}
|
||||
|
||||
/**
|
||||
* DATA as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API
|
||||
*
|
||||
* When encoding UNFORMATTED DATA (byte arrays, account addresses, hashes, bytecode arrays): encode as hex, prefix with
|
||||
* “0x”, two hex digits per byte. Examples:
|
||||
*
|
||||
* - 0x41 (size 1, “A”)
|
||||
* - 0x004200 (size 3, “\0B\0”)
|
||||
* - 0x (size 0, “”)
|
||||
* - WRONG: 0xf0f0f (must be even number of digits)
|
||||
* - WRONG: 004200 (must be prefixed 0x)
|
||||
*/
|
||||
export function bytesToData(bytes: Uint8Array): DATA {
|
||||
return toHex(bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* DATA as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API
|
||||
*/
|
||||
export function dataToBytes(hex: DATA, fixedLength: number | null): Uint8Array {
|
||||
try {
|
||||
const bytes = fromHex(hex);
|
||||
if (fixedLength != null && bytes.length !== fixedLength) {
|
||||
throw Error(`Wrong data length ${bytes.length} expected ${fixedLength}`);
|
||||
}
|
||||
return bytes;
|
||||
} catch (e) {
|
||||
(e as Error).message = `Invalid hex string: ${(e as Error).message}`;
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert DATA into a preallocated buffer
|
||||
* fromHexInto will throw if buffer's length is not the same as the decoded hex length
|
||||
*/
|
||||
export function dataIntoBytes(hex: DATA, buffer: Uint8Array): Uint8Array {
|
||||
fromHexInto(hex, buffer);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* DATA as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API
|
||||
*/
|
||||
export function dataToRootHex(hex: DATA, id = ""): RootHex {
|
||||
if (!rootHexRegex.test(hex)) throw Error(`Invalid hex root ${id} '${hex}'`);
|
||||
return hex;
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
import {phase0} from "@lodestar/types";
|
||||
import {sleep} from "@lodestar/utils";
|
||||
import {BatchDepositEvents, Eth1Block, IEth1Provider, IEth1StreamParams} from "./interface.js";
|
||||
import {parseEth1Block} from "./provider/eth1Provider.js";
|
||||
import {groupDepositEventsByBlock} from "./utils/groupDepositEventsByBlock.js";
|
||||
import {optimizeNextBlockDiffForGenesis} from "./utils/optimizeNextBlockDiffForGenesis.js";
|
||||
|
||||
/**
|
||||
* Phase 1 of genesis building.
|
||||
* Not enough validators, only stream deposits
|
||||
* @param signal Abort stream returning after a while loop cycle. Aborts internal sleep
|
||||
*/
|
||||
export async function* getDepositsStream(
|
||||
fromBlock: number,
|
||||
provider: IEth1Provider,
|
||||
params: IEth1StreamParams,
|
||||
signal?: AbortSignal
|
||||
): AsyncGenerator<BatchDepositEvents> {
|
||||
fromBlock = Math.max(fromBlock, provider.deployBlock);
|
||||
|
||||
while (true) {
|
||||
const remoteFollowBlock = await getRemoteFollowBlock(provider, params);
|
||||
const toBlock = Math.min(remoteFollowBlock, fromBlock + params.maxBlocksPerPoll);
|
||||
const logs = await provider.getDepositEvents(fromBlock, toBlock);
|
||||
for (const batchedDeposits of groupDepositEventsByBlock(logs)) {
|
||||
yield batchedDeposits;
|
||||
}
|
||||
|
||||
fromBlock = toBlock;
|
||||
|
||||
// If reached head, sleep for an eth1 block. Throws if signal is aborted
|
||||
await sleep(toBlock >= remoteFollowBlock ? params.SECONDS_PER_ETH1_BLOCK * 1000 : 10, signal);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Phase 2 of genesis building.
|
||||
* There are enough validators, stream deposits and blocks
|
||||
* @param signal Abort stream returning after a while loop cycle. Aborts internal sleep
|
||||
*/
|
||||
export async function* getDepositsAndBlockStreamForGenesis(
|
||||
fromBlock: number,
|
||||
provider: IEth1Provider,
|
||||
params: IEth1StreamParams,
|
||||
signal?: AbortSignal
|
||||
): AsyncGenerator<[phase0.DepositEvent[], Eth1Block]> {
|
||||
fromBlock = Math.max(fromBlock, provider.deployBlock);
|
||||
fromBlock = Math.min(fromBlock, await getRemoteFollowBlock(provider, params));
|
||||
let toBlock = fromBlock; // First, fetch only the first block
|
||||
|
||||
while (true) {
|
||||
const [logs, blockRaw] = await Promise.all([
|
||||
provider.getDepositEvents(fromBlock, toBlock),
|
||||
provider.getBlockByNumber(toBlock),
|
||||
]);
|
||||
|
||||
if (!blockRaw) throw Error(`No block found for number ${toBlock}`);
|
||||
const block = parseEth1Block(blockRaw);
|
||||
|
||||
yield [logs, block];
|
||||
|
||||
const remoteFollowBlock = await getRemoteFollowBlock(provider, params);
|
||||
const nextBlockDiff = optimizeNextBlockDiffForGenesis(block, params);
|
||||
fromBlock = toBlock;
|
||||
toBlock = Math.min(remoteFollowBlock, fromBlock + Math.min(nextBlockDiff, params.maxBlocksPerPoll));
|
||||
|
||||
// If reached head, sleep for an eth1 block. Throws if signal is aborted
|
||||
await sleep(toBlock >= remoteFollowBlock ? params.SECONDS_PER_ETH1_BLOCK * 1000 : 10, signal);
|
||||
}
|
||||
}
|
||||
|
||||
async function getRemoteFollowBlock(provider: IEth1Provider, params: IEth1StreamParams): Promise<number> {
|
||||
const remoteHighestBlock = await provider.getBlockNumber();
|
||||
return Math.max(remoteHighestBlock - params.ETH1_FOLLOW_DISTANCE, 0);
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
import {Interface} from "@ethersproject/abi";
|
||||
import {phase0, ssz} from "@lodestar/types";
|
||||
import {fromHex} from "@lodestar/utils";
|
||||
|
||||
const depositEventFragment =
|
||||
"event DepositEvent(bytes pubkey, bytes withdrawal_credentials, bytes amount, bytes signature, bytes index)";
|
||||
|
||||
const depositContractInterface = new Interface([depositEventFragment]);
|
||||
|
||||
/**
|
||||
* Precomputed topics of DepositEvent logs
|
||||
*/
|
||||
export const depositEventTopics = [depositContractInterface.getEventTopic("DepositEvent")];
|
||||
|
||||
/**
|
||||
* Parse DepositEvent log
|
||||
*/
|
||||
export function parseDepositLog(log: {blockNumber: number; data: string; topics: string[]}): phase0.DepositEvent {
|
||||
const event = depositContractInterface.parseLog(log);
|
||||
const values = event.args;
|
||||
if (values === undefined) throw Error(`DepositEvent at ${log.blockNumber} has no values`);
|
||||
return {
|
||||
blockNumber: log.blockNumber,
|
||||
index: parseHexNumLittleEndian(values.index),
|
||||
depositData: {
|
||||
pubkey: fromHex(values.pubkey),
|
||||
withdrawalCredentials: fromHex(values.withdrawal_credentials),
|
||||
amount: parseHexNumLittleEndian(values.amount),
|
||||
signature: fromHex(values.signature),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function parseHexNumLittleEndian(hex: string): number {
|
||||
// Can't use parseInt() because amount is a hex string in little endian
|
||||
return ssz.UintNum64.deserialize(fromHex(hex));
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
import {Tree, toGindex} from "@chainsafe/persistent-merkle-tree";
|
||||
import {FilterOptions} from "@lodestar/db";
|
||||
import {CachedBeaconStateAllForks, getEth1DepositCount} from "@lodestar/state-transition";
|
||||
import {phase0, ssz} from "@lodestar/types";
|
||||
import {toRootHex} from "@lodestar/utils";
|
||||
import {DepositTree} from "../../db/repositories/depositDataRoot.js";
|
||||
import {Eth1Error, Eth1ErrorCode} from "../errors.js";
|
||||
|
||||
export type DepositGetter<T> = (indexRange: FilterOptions<number>, eth1Data: phase0.Eth1Data) => Promise<T[]>;
|
||||
|
||||
export async function getDeposits<T>(
|
||||
// eth1_deposit_index represents the next deposit index to be added
|
||||
state: CachedBeaconStateAllForks,
|
||||
eth1Data: phase0.Eth1Data,
|
||||
depositsGetter: DepositGetter<T>
|
||||
): Promise<T[]> {
|
||||
const depositIndex = state.eth1DepositIndex;
|
||||
const depositCount = eth1Data.depositCount;
|
||||
|
||||
if (depositIndex > depositCount) {
|
||||
throw new Eth1Error({code: Eth1ErrorCode.DEPOSIT_INDEX_TOO_HIGH, depositIndex, depositCount});
|
||||
}
|
||||
|
||||
const depositsLen = getEth1DepositCount(state, eth1Data);
|
||||
|
||||
if (depositsLen === 0) {
|
||||
return []; // If depositsLen === 0, we can return early since no deposit with be returned from depositsGetter
|
||||
}
|
||||
|
||||
const indexRange = {gte: depositIndex, lt: depositIndex + depositsLen};
|
||||
const deposits = await depositsGetter(indexRange, eth1Data);
|
||||
|
||||
if (deposits.length < depositsLen) {
|
||||
throw new Eth1Error({code: Eth1ErrorCode.NOT_ENOUGH_DEPOSITS, len: deposits.length, expectedLen: depositsLen});
|
||||
}
|
||||
|
||||
if (deposits.length > depositsLen) {
|
||||
throw new Eth1Error({code: Eth1ErrorCode.TOO_MANY_DEPOSITS, len: deposits.length, expectedLen: depositsLen});
|
||||
}
|
||||
|
||||
return deposits;
|
||||
}
|
||||
|
||||
export function getDepositsWithProofs(
|
||||
depositEvents: phase0.DepositEvent[],
|
||||
depositRootTree: DepositTree,
|
||||
eth1Data: phase0.Eth1Data
|
||||
): phase0.Deposit[] {
|
||||
// Get tree at this particular depositCount to compute correct proofs
|
||||
const viewAtDepositCount = depositRootTree.sliceTo(eth1Data.depositCount - 1);
|
||||
|
||||
const depositRoot = viewAtDepositCount.hashTreeRoot();
|
||||
|
||||
if (!ssz.Root.equals(depositRoot, eth1Data.depositRoot)) {
|
||||
throw new Eth1Error({
|
||||
code: Eth1ErrorCode.WRONG_DEPOSIT_ROOT,
|
||||
root: toRootHex(depositRoot),
|
||||
expectedRoot: toRootHex(eth1Data.depositRoot),
|
||||
});
|
||||
}
|
||||
|
||||
// Already commited for .hashTreeRoot()
|
||||
const treeAtDepositCount = new Tree(viewAtDepositCount.node);
|
||||
const depositTreeDepth = viewAtDepositCount.type.depth;
|
||||
|
||||
return depositEvents.map((log) => ({
|
||||
proof: treeAtDepositCount.getSingleProof(toGindex(depositTreeDepth, BigInt(log.index))),
|
||||
data: log.depositData,
|
||||
}));
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
import {Root, phase0} from "@lodestar/types";
|
||||
import {DepositTree} from "../../db/repositories/depositDataRoot.js";
|
||||
import {binarySearchLte} from "../../util/binarySearch.js";
|
||||
import {Eth1Error, Eth1ErrorCode} from "../errors.js";
|
||||
import {Eth1Block} from "../interface.js";
|
||||
|
||||
type BlockNumber = number;
|
||||
|
||||
/**
|
||||
* Appends partial eth1 data (depositRoot, depositCount) in a sequence of blocks
|
||||
* eth1 data deposit is inferred from sparse eth1 data obtained from the deposit logs
|
||||
*/
|
||||
export async function getEth1DataForBlocks(
|
||||
blocks: Eth1Block[],
|
||||
depositDescendingStream: AsyncIterable<phase0.DepositEvent>,
|
||||
depositRootTree: DepositTree,
|
||||
lastProcessedDepositBlockNumber: BlockNumber | null
|
||||
): Promise<(phase0.Eth1Data & Eth1Block)[]> {
|
||||
// Exclude blocks for which there is no valid eth1 data deposit
|
||||
if (lastProcessedDepositBlockNumber !== null) {
|
||||
blocks = blocks.filter((block) => block.blockNumber <= lastProcessedDepositBlockNumber);
|
||||
}
|
||||
|
||||
// A valid block can be constructed using previous `state.eth1Data`, don't throw
|
||||
if (blocks.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Collect the latest deposit of each blockNumber in a block number range
|
||||
const fromBlock = blocks[0].blockNumber;
|
||||
const toBlock = blocks.at(-1)?.blockNumber as number;
|
||||
const depositsByBlockNumber = await getDepositsByBlockNumber(fromBlock, toBlock, depositDescendingStream);
|
||||
if (depositsByBlockNumber.length === 0) {
|
||||
throw new Eth1Error({code: Eth1ErrorCode.NO_DEPOSITS_FOR_BLOCK_RANGE, fromBlock, toBlock});
|
||||
}
|
||||
|
||||
// Precompute a map of depositCount => depositRoot (from depositRootTree)
|
||||
const depositCounts = depositsByBlockNumber.map((event) => event.index + 1);
|
||||
const depositRootByDepositCount = getDepositRootByDepositCount(depositCounts, depositRootTree);
|
||||
|
||||
const eth1Datas: (phase0.Eth1Data & Eth1Block)[] = [];
|
||||
for (const block of blocks) {
|
||||
const deposit = binarySearchLte(depositsByBlockNumber, block.blockNumber, (event) => event.blockNumber);
|
||||
const depositCount = deposit.index + 1;
|
||||
const depositRoot = depositRootByDepositCount.get(depositCount);
|
||||
if (depositRoot === undefined) {
|
||||
throw new Eth1Error({code: Eth1ErrorCode.NO_DEPOSIT_ROOT, depositCount});
|
||||
}
|
||||
eth1Datas.push({...block, depositCount, depositRoot});
|
||||
}
|
||||
return eth1Datas;
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect depositCount by blockNumber from a stream matching a block number range
|
||||
* For a given blockNumber it's depositCount is equal to the index + 1 of the
|
||||
* closest deposit event whose deposit.blockNumber <= blockNumber
|
||||
* @returns array ascending by blockNumber
|
||||
*/
|
||||
export async function getDepositsByBlockNumber(
|
||||
fromBlock: BlockNumber,
|
||||
toBlock: BlockNumber,
|
||||
depositEventDescendingStream: AsyncIterable<phase0.DepositEvent>
|
||||
): Promise<phase0.DepositEvent[]> {
|
||||
const depositCountMap = new Map<BlockNumber, phase0.DepositEvent>();
|
||||
// Take blocks until the block under the range lower bound (included)
|
||||
for await (const deposit of depositEventDescendingStream) {
|
||||
if (deposit.blockNumber <= toBlock && !depositCountMap.has(deposit.blockNumber)) {
|
||||
depositCountMap.set(deposit.blockNumber, deposit);
|
||||
}
|
||||
if (deposit.blockNumber < fromBlock) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(depositCountMap.values()).sort((a, b) => a.blockNumber - b.blockNumber);
|
||||
}
|
||||
|
||||
/**
|
||||
* Precompute a map of depositCount => depositRoot from a depositRootTree filled beforehand
|
||||
*/
|
||||
export function getDepositRootByDepositCount(depositCounts: number[], depositRootTree: DepositTree): Map<number, Root> {
|
||||
// Unique + sort numerically in descending order
|
||||
depositCounts = [...new Set(depositCounts)].sort((a, b) => b - a);
|
||||
|
||||
if (depositCounts.length > 0) {
|
||||
const maxIndex = depositCounts[0] - 1;
|
||||
const treeLength = depositRootTree.length - 1;
|
||||
if (maxIndex > treeLength) {
|
||||
throw new Eth1Error({code: Eth1ErrorCode.NOT_ENOUGH_DEPOSIT_ROOTS, index: maxIndex, treeLength});
|
||||
}
|
||||
}
|
||||
|
||||
const depositRootByDepositCount = new Map<number, Root>();
|
||||
for (const depositCount of depositCounts) {
|
||||
depositRootTree = depositRootTree.sliceTo(depositCount - 1);
|
||||
depositRootByDepositCount.set(depositCount, depositRootTree.hashTreeRoot());
|
||||
}
|
||||
return depositRootByDepositCount;
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
/**
|
||||
* Assert that an array of deposits are consecutive and ascending
|
||||
*/
|
||||
export function assertConsecutiveDeposits(depositEvents: {index: number}[]): void {
|
||||
for (let i = 0; i < depositEvents.length - 1; i++) {
|
||||
const indexLeft = depositEvents[i].index;
|
||||
const indexRight = depositEvents[i + 1].index;
|
||||
if (indexLeft !== indexRight - 1) {
|
||||
throw Error(`Non consecutive deposits. deposit[${i}] = ${indexLeft}, deposit[${i + 1}] ${indexRight}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {EPOCHS_PER_ETH1_VOTING_PERIOD, SLOTS_PER_EPOCH, isForkPostElectra} from "@lodestar/params";
|
||||
import {BeaconStateAllForks, BeaconStateElectra, computeTimeAtSlot} from "@lodestar/state-transition";
|
||||
import {RootHex, phase0} from "@lodestar/types";
|
||||
import {toRootHex} from "@lodestar/utils";
|
||||
|
||||
export type Eth1DataGetter = ({
|
||||
timestampRange,
|
||||
}: {
|
||||
timestampRange: {gte: number; lte: number};
|
||||
}) => Promise<phase0.Eth1Data[]>;
|
||||
|
||||
export async function getEth1VotesToConsider(
|
||||
config: ChainForkConfig,
|
||||
state: BeaconStateAllForks,
|
||||
eth1DataGetter: Eth1DataGetter
|
||||
): Promise<phase0.Eth1Data[]> {
|
||||
const fork = config.getForkName(state.slot);
|
||||
if (isForkPostElectra(fork)) {
|
||||
const {eth1DepositIndex, depositRequestsStartIndex} = state as BeaconStateElectra;
|
||||
if (eth1DepositIndex === Number(depositRequestsStartIndex)) {
|
||||
return state.eth1DataVotes.getAllReadonly();
|
||||
}
|
||||
}
|
||||
|
||||
const periodStart = votingPeriodStartTime(config, state);
|
||||
const {SECONDS_PER_ETH1_BLOCK, ETH1_FOLLOW_DISTANCE} = config;
|
||||
|
||||
// Modified version of the spec function to fetch the required range directly from the DB
|
||||
return (
|
||||
await eth1DataGetter({
|
||||
timestampRange: {
|
||||
// Spec v0.12.2
|
||||
// is_candidate_block =
|
||||
// block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= period_start &&
|
||||
// block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE * 2 >= period_start
|
||||
lte: periodStart - SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE,
|
||||
gte: periodStart - SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE * 2,
|
||||
},
|
||||
})
|
||||
).filter((eth1Data) => eth1Data.depositCount >= state.eth1Data.depositCount);
|
||||
}
|
||||
|
||||
export function pickEth1Vote(state: BeaconStateAllForks, votesToConsider: phase0.Eth1Data[]): phase0.Eth1Data {
|
||||
const votesToConsiderKeys = new Set<string>();
|
||||
for (const eth1Data of votesToConsider) {
|
||||
votesToConsiderKeys.add(getEth1DataKey(eth1Data));
|
||||
}
|
||||
|
||||
const eth1DataHashToEth1Data = new Map<RootHex, phase0.Eth1Data>();
|
||||
const eth1DataVoteCountByRoot = new Map<RootHex, number>();
|
||||
const eth1DataVotesOrder: RootHex[] = [];
|
||||
|
||||
// BeaconStateAllForks is always represented as a tree with a hashing cache.
|
||||
// To check equality its cheaper to use hashTreeRoot as keys.
|
||||
// However `votesToConsider` is an array of values since those are read from DB.
|
||||
// TODO: Optimize cache of known votes, to prevent re-hashing stored values.
|
||||
// Note: for low validator counts it's not very important, since this runs once per proposal
|
||||
const eth1DataVotes = state.eth1DataVotes.getAllReadonly();
|
||||
for (const eth1DataVote of eth1DataVotes) {
|
||||
const rootHex = getEth1DataKey(eth1DataVote);
|
||||
|
||||
if (votesToConsiderKeys.has(rootHex)) {
|
||||
const prevVoteCount = eth1DataVoteCountByRoot.get(rootHex);
|
||||
eth1DataVoteCountByRoot.set(rootHex, 1 + (prevVoteCount ?? 0));
|
||||
|
||||
// Cache eth1DataVote to root Map only once per root
|
||||
if (prevVoteCount === undefined) {
|
||||
eth1DataHashToEth1Data.set(rootHex, eth1DataVote);
|
||||
eth1DataVotesOrder.push(rootHex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const eth1DataRootsMaxVotes = getKeysWithMaxValue(eth1DataVoteCountByRoot);
|
||||
|
||||
// No votes, vote for the last valid vote
|
||||
if (eth1DataRootsMaxVotes.length === 0) {
|
||||
return votesToConsider.at(-1) ?? state.eth1Data;
|
||||
}
|
||||
|
||||
// If there's a single winning vote with a majority vote that one
|
||||
if (eth1DataRootsMaxVotes.length === 1) {
|
||||
return eth1DataHashToEth1Data.get(eth1DataRootsMaxVotes[0]) ?? state.eth1Data;
|
||||
}
|
||||
|
||||
// If there are multiple winning votes, vote for the latest one
|
||||
const latestMostVotedRoot =
|
||||
eth1DataVotesOrder[Math.max(...eth1DataRootsMaxVotes.map((root) => eth1DataVotesOrder.indexOf(root)))];
|
||||
return eth1DataHashToEth1Data.get(latestMostVotedRoot) ?? state.eth1Data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the array of keys with max value. May return 0, 1 or more keys
|
||||
*/
|
||||
function getKeysWithMaxValue<T>(map: Map<T, number>): T[] {
|
||||
const entries = Array.from(map.entries());
|
||||
let keysMax: T[] = [];
|
||||
let valueMax = -Infinity;
|
||||
|
||||
for (const [key, value] of entries) {
|
||||
if (value > valueMax) {
|
||||
keysMax = [key];
|
||||
valueMax = value;
|
||||
} else if (value === valueMax) {
|
||||
keysMax.push(key);
|
||||
}
|
||||
}
|
||||
|
||||
return keysMax;
|
||||
}
|
||||
|
||||
/**
|
||||
* Key-ed by fastSerializeEth1Data(). votesToConsider is read from DB as struct and always has a length of 2048.
|
||||
* `state.eth1DataVotes` has a length between 0 and ETH1_FOLLOW_DISTANCE with an equal probability of each value.
|
||||
* So to get the average faster time to key both votesToConsider and state.eth1DataVotes it's better to use
|
||||
* fastSerializeEth1Data(). However, a long term solution is to cache valid votes in memory and prevent having
|
||||
* to recompute their key on every proposal.
|
||||
*
|
||||
* With `fastSerializeEth1Data()`: avg time 20 ms/op
|
||||
* ✓ pickEth1Vote - no votes 233.0587 ops/s 4.290764 ms/op - 121 runs 1.02 s
|
||||
* ✓ pickEth1Vote - max votes 29.21546 ops/s 34.22845 ms/op - 25 runs 1.38 s
|
||||
*
|
||||
* With `toHexString(ssz.phase0.Eth1Data.hashTreeRoot(eth1Data))`: avg time 23 ms/op
|
||||
* ✓ pickEth1Vote - no votes 46.12341 ops/s 21.68096 ms/op - 133 runs 3.40 s
|
||||
* ✓ pickEth1Vote - max votes 37.89912 ops/s 26.38583 ms/op - 29 runs 1.27 s
|
||||
*/
|
||||
function getEth1DataKey(eth1Data: phase0.Eth1Data): string {
|
||||
return fastSerializeEth1Data(eth1Data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize eth1Data types to a unique string ID. It is only used for comparison.
|
||||
*/
|
||||
export function fastSerializeEth1Data(eth1Data: phase0.Eth1Data): string {
|
||||
return toRootHex(eth1Data.blockHash) + eth1Data.depositCount.toString(16) + toRootHex(eth1Data.depositRoot);
|
||||
}
|
||||
|
||||
export function votingPeriodStartTime(config: ChainForkConfig, state: BeaconStateAllForks): number {
|
||||
const eth1VotingPeriodStartSlot = state.slot - (state.slot % (EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH));
|
||||
return computeTimeAtSlot(config, eth1VotingPeriodStartSlot, state.genesisTime);
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
import {phase0} from "@lodestar/types";
|
||||
import {BatchDepositEvents} from "../interface.js";
|
||||
|
||||
/**
|
||||
* Return deposit events of blocks grouped/sorted by block number and deposit index
|
||||
* Blocks without events are omitted
|
||||
* @param depositEvents range deposit events
|
||||
*/
|
||||
export function groupDepositEventsByBlock(depositEvents: phase0.DepositEvent[]): BatchDepositEvents[] {
|
||||
depositEvents.sort((event1, event2) => event1.index - event2.index);
|
||||
const depositsByBlockMap = new Map<number, phase0.DepositEvent[]>();
|
||||
for (const deposit of depositEvents) {
|
||||
depositsByBlockMap.set(deposit.blockNumber, [...(depositsByBlockMap.get(deposit.blockNumber) || []), deposit]);
|
||||
}
|
||||
return Array.from(depositsByBlockMap.entries()).map(([blockNumber, depositEvents]) => ({
|
||||
blockNumber,
|
||||
depositEvents,
|
||||
}));
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
import {ChainConfig} from "@lodestar/config";
|
||||
|
||||
/**
|
||||
* Utility for fetching genesis min genesis time block
|
||||
* Returns an approximation of the next block diff to fetch to progressively
|
||||
* get closer to the block that satisfies min genesis time condition
|
||||
*/
|
||||
export function optimizeNextBlockDiffForGenesis(
|
||||
lastFetchedBlock: {timestamp: number},
|
||||
params: Pick<ChainConfig, "MIN_GENESIS_TIME" | "GENESIS_DELAY" | "SECONDS_PER_ETH1_BLOCK">
|
||||
): number {
|
||||
const timeToGenesis = params.MIN_GENESIS_TIME - params.GENESIS_DELAY - lastFetchedBlock.timestamp;
|
||||
const numBlocksToGenesis = Math.floor(timeToGenesis / params.SECONDS_PER_ETH1_BLOCK);
|
||||
if (numBlocksToGenesis <= 2) {
|
||||
return 1;
|
||||
}
|
||||
return Math.max(1, Math.floor(numBlocksToGenesis / 2));
|
||||
}
|
||||
@@ -4,14 +4,6 @@ import {BlobsBundle, ExecutionPayload, ExecutionRequests, Root, RootHex, Wei} fr
|
||||
import {BlobAndProof} from "@lodestar/types/deneb";
|
||||
import {BlobAndProofV2} from "@lodestar/types/fulu";
|
||||
import {strip0xPrefix} from "@lodestar/utils";
|
||||
import {
|
||||
ErrorJsonRpcResponse,
|
||||
HttpRpcError,
|
||||
IJsonRpcHttpClient,
|
||||
JsonRpcHttpClientEvent,
|
||||
ReqOpts,
|
||||
} from "../../eth1/provider/jsonRpcHttpClient.js";
|
||||
import {bytesToData, numToQuantity} from "../../eth1/provider/utils.js";
|
||||
import {Metrics} from "../../metrics/index.js";
|
||||
import {EPOCHS_PER_BATCH} from "../../sync/constants.js";
|
||||
import {getLodestarClientVersion} from "../../util/metadata.js";
|
||||
@@ -27,6 +19,13 @@ import {
|
||||
PayloadId,
|
||||
VersionedHashes,
|
||||
} from "./interface.js";
|
||||
import {
|
||||
ErrorJsonRpcResponse,
|
||||
HttpRpcError,
|
||||
IJsonRpcHttpClient,
|
||||
JsonRpcHttpClientEvent,
|
||||
ReqOpts,
|
||||
} from "./jsonRpcHttpClient.js";
|
||||
import {PayloadIdCache} from "./payloadIdCache.js";
|
||||
import {
|
||||
BLOB_AND_PROOF_V2_RPC_BYTES,
|
||||
@@ -45,7 +44,7 @@ import {
|
||||
serializePayloadAttributes,
|
||||
serializeVersionedHashes,
|
||||
} from "./types.js";
|
||||
import {getExecutionEngineState} from "./utils.js";
|
||||
import {bytesToData, getExecutionEngineState, numToQuantity} from "./utils.js";
|
||||
|
||||
export type ExecutionEngineModules = {
|
||||
signal: AbortSignal;
|
||||
@@ -194,15 +193,12 @@ export class ExecutionEngineHttp implements IExecutionEngine {
|
||||
* 1. {status: INVALID_BLOCK_HASH, latestValidHash: null, validationError:
|
||||
* errorMessage | null} if the blockHash validation has failed
|
||||
*
|
||||
* 2. {status: INVALID_TERMINAL_BLOCK, latestValidHash: null, validationError:
|
||||
* errorMessage | null} if terminal block conditions are not satisfied
|
||||
*
|
||||
* 3. {status: SYNCING, latestValidHash: null, validationError: null} if the payload
|
||||
* 2. {status: SYNCING, latestValidHash: null, validationError: null} if the payload
|
||||
* extends the canonical chain and requisite data for its validation is missing
|
||||
* with the payload status obtained from the Payload validation process if the payload
|
||||
* has been fully validated while processing the call
|
||||
*
|
||||
* 4. {status: ACCEPTED, latestValidHash: null, validationError: null} if the
|
||||
* 3. {status: ACCEPTED, latestValidHash: null, validationError: null} if the
|
||||
* following conditions are met:
|
||||
* i) the blockHash of the payload is valid
|
||||
* ii) the payload doesn't extend the canonical chain
|
||||
@@ -330,16 +326,11 @@ export class ExecutionEngineHttp implements IExecutionEngine {
|
||||
* errorMessage | null}, payloadId: null}
|
||||
* obtained from the Payload validation process if the payload is deemed INVALID
|
||||
*
|
||||
* 3. {payloadStatus: {status: INVALID_TERMINAL_BLOCK, latestValidHash: null,
|
||||
* validationError: errorMessage | null}, payloadId: null}
|
||||
* either obtained from the Payload validation process or as a result of validating a
|
||||
* PoW block referenced by forkchoiceState.headBlockHash
|
||||
*
|
||||
* 4. {payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash,
|
||||
* 3. {payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash,
|
||||
* validationError: null}, payloadId: null}
|
||||
* if the payload is deemed VALID and a build process hasn't been started
|
||||
*
|
||||
* 5. {payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash,
|
||||
* 4. {payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash,
|
||||
* validationError: null}, payloadId: buildProcessId}
|
||||
* if the payload is deemed VALID and the build process has begun.
|
||||
*
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import {fromHex, toPrintableUrl} from "@lodestar/utils";
|
||||
import {JsonRpcHttpClient} from "../../eth1/provider/jsonRpcHttpClient.js";
|
||||
import {ExecutionEngineDisabled} from "./disabled.js";
|
||||
import {
|
||||
ExecutionEngineHttp,
|
||||
@@ -8,6 +7,7 @@ import {
|
||||
defaultExecutionEngineHttpOpts,
|
||||
} from "./http.js";
|
||||
import {IExecutionEngine} from "./interface.js";
|
||||
import {JsonRpcHttpClient} from "./jsonRpcHttpClient.js";
|
||||
import {ExecutionEngineMockBackend, ExecutionEngineMockOpts} from "./mock.js";
|
||||
import {ExecutionEngineMockJsonRpcClient, JsonRpcBackend} from "./utils.js";
|
||||
|
||||
|
||||
@@ -9,9 +9,9 @@ import {
|
||||
import {BlobsBundle, ExecutionPayload, ExecutionRequests, Root, RootHex, Wei, capella} from "@lodestar/types";
|
||||
import {BlobAndProof} from "@lodestar/types/deneb";
|
||||
import {BlobAndProofV2} from "@lodestar/types/fulu";
|
||||
import {DATA} from "../../eth1/provider/utils.js";
|
||||
import {PayloadId, PayloadIdCache, WithdrawalV1} from "./payloadIdCache.js";
|
||||
import {ExecutionPayloadBody} from "./types.js";
|
||||
import {DATA} from "./utils.js";
|
||||
|
||||
export {PayloadIdCache, type PayloadId, type WithdrawalV1};
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import {EventEmitter} from "node:events";
|
||||
import {StrictEventEmitter} from "strict-event-emitter-types";
|
||||
import {ErrorAborted, Gauge, Histogram, TimeoutError, fetch, isValidHttpUrl, retry} from "@lodestar/utils";
|
||||
import {IJson, RpcPayload} from "../interface.js";
|
||||
import {JwtClaim, encodeJwtToken} from "./jwt.js";
|
||||
import {IJson, RpcPayload} from "./utils.js";
|
||||
|
||||
export enum JsonRpcHttpClientEvent {
|
||||
/**
|
||||
@@ -9,9 +9,8 @@ import {
|
||||
ForkSeq,
|
||||
} from "@lodestar/params";
|
||||
import {ExecutionPayload, RootHex, bellatrix, deneb, ssz} from "@lodestar/types";
|
||||
import {fromHex, toHex, toRootHex} from "@lodestar/utils";
|
||||
import {fromHex, toRootHex} from "@lodestar/utils";
|
||||
import {ZERO_HASH_HEX} from "../../constants/index.js";
|
||||
import {quantityToNum} from "../../eth1/provider/utils.js";
|
||||
import {INTEROP_BLOCK_HASH} from "../../node/utils/interop/state.js";
|
||||
import {kzgCommitmentToVersionedHash} from "../../util/blobs.js";
|
||||
import {kzg} from "../../util/kzg.js";
|
||||
@@ -29,7 +28,7 @@ import {
|
||||
serializeExecutionPayload,
|
||||
serializeExecutionRequests,
|
||||
} from "./types.js";
|
||||
import {JsonRpcBackend} from "./utils.js";
|
||||
import {JsonRpcBackend, quantityToNum} from "./utils.js";
|
||||
|
||||
const INTEROP_GAS_LIMIT = 30e6;
|
||||
const PRUNE_PAYLOAD_ID_AFTER_MS = 5000;
|
||||
@@ -70,7 +69,7 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
finalizedBlockHash = ZERO_HASH_HEX;
|
||||
readonly payloadIdCache = new PayloadIdCache();
|
||||
|
||||
/** Known valid blocks, both pre-merge and post-merge */
|
||||
/** Known valid blocks */
|
||||
private readonly validBlocks = new Map<RootHex, ExecutionBlock>();
|
||||
/** Preparing payloads to be retrieved via engine_getPayloadV1 */
|
||||
private readonly preparingPayloads = new Map<number, PreparedPayload>();
|
||||
@@ -135,18 +134,6 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
return [] as ExecutionPayloadBodyRpc[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock manipulator to add more known blocks to this mock.
|
||||
*/
|
||||
addPowBlock(powBlock: bellatrix.PowBlock): void {
|
||||
this.validBlocks.set(toHex(powBlock.blockHash), {
|
||||
parentHash: toHex(powBlock.parentHash),
|
||||
blockHash: toHex(powBlock.blockHash),
|
||||
timestamp: 0,
|
||||
blockNumber: 0,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock manipulator to add predefined responses before execution engine client calls
|
||||
*/
|
||||
@@ -258,7 +245,7 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
// section of the EIP. Additionally, if this validation fails, client software MUST NOT update the forkchoice
|
||||
// state and MUST NOT begin a payload build process.
|
||||
//
|
||||
// > TODO
|
||||
// > N/A: All networks have completed the merge transition
|
||||
|
||||
// 4. Before updating the forkchoice state, client software MUST ensure the validity of the payload referenced by
|
||||
// forkchoiceState.headBlockHash, and MAY validate the payload while processing the call. The validation process
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import {SLOTS_PER_EPOCH} from "@lodestar/params";
|
||||
import {pruneSetToMax} from "@lodestar/utils";
|
||||
import {DATA, QUANTITY} from "../../eth1/provider/utils.js";
|
||||
import {PayloadAttributesRpc} from "./types.js";
|
||||
import {DATA, QUANTITY} from "./utils.js";
|
||||
|
||||
// Idealy this only need to be set to the max head reorgs number
|
||||
const MAX_PAYLOAD_IDS = SLOTS_PER_EPOCH;
|
||||
|
||||
@@ -23,6 +23,14 @@ import {
|
||||
} from "@lodestar/types";
|
||||
import {BlobAndProof} from "@lodestar/types/deneb";
|
||||
import {BlobAndProofV2} from "@lodestar/types/fulu";
|
||||
import {
|
||||
ExecutionPayloadStatus,
|
||||
ExecutionRequestType,
|
||||
PayloadAttributes,
|
||||
VersionedHashes,
|
||||
isExecutionRequestType,
|
||||
} from "./interface.js";
|
||||
import {WithdrawalV1} from "./payloadIdCache.js";
|
||||
import {
|
||||
DATA,
|
||||
QUANTITY,
|
||||
@@ -32,15 +40,7 @@ import {
|
||||
numToQuantity,
|
||||
quantityToBigint,
|
||||
quantityToNum,
|
||||
} from "../../eth1/provider/utils.js";
|
||||
import {
|
||||
ExecutionPayloadStatus,
|
||||
ExecutionRequestType,
|
||||
PayloadAttributes,
|
||||
VersionedHashes,
|
||||
isExecutionRequestType,
|
||||
} from "./interface.js";
|
||||
import {WithdrawalV1} from "./payloadIdCache.js";
|
||||
} from "./utils.js";
|
||||
|
||||
export type EngineApiRpcParamTypes = {
|
||||
/**
|
||||
|
||||
@@ -1,14 +1,120 @@
|
||||
import {isErrorAborted, isFetchError} from "@lodestar/utils";
|
||||
import {IJson, RpcPayload} from "../../eth1/interface.js";
|
||||
import {bigIntToBytes, bytesToBigInt, fromHex, fromHexInto, isErrorAborted, isFetchError, toHex} from "@lodestar/utils";
|
||||
import {isQueueErrorAborted} from "../../util/queue/errors.js";
|
||||
import {ExecutionEngineState, ExecutionPayloadStatus} from "./interface.js";
|
||||
import {
|
||||
ErrorJsonRpcResponse,
|
||||
HttpRpcError,
|
||||
IJsonRpcHttpClient,
|
||||
JsonRpcHttpClientEvent,
|
||||
JsonRpcHttpClientEventEmitter,
|
||||
} from "../../eth1/provider/jsonRpcHttpClient.js";
|
||||
import {isQueueErrorAborted} from "../../util/queue/errors.js";
|
||||
import {ExecutionEngineState, ExecutionPayloadStatus} from "./interface.js";
|
||||
} from "./jsonRpcHttpClient.js";
|
||||
|
||||
/** QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API */
|
||||
export type QUANTITY = string;
|
||||
/** DATA as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API */
|
||||
export type DATA = string;
|
||||
|
||||
export const rootHexRegex = /^0x[a-fA-F0-9]{64}$/;
|
||||
|
||||
export type IJson = string | number | boolean | undefined | IJson[] | {[key: string]: IJson};
|
||||
|
||||
export interface RpcPayload<P = IJson[]> {
|
||||
method: string;
|
||||
params: P;
|
||||
}
|
||||
|
||||
/**
|
||||
* QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API
|
||||
*
|
||||
* When encoding QUANTITIES (integers, numbers): encode as hex, prefix with “0x”, the most compact representation (slight exception: zero should be represented as “0x0”). Examples:
|
||||
* - 0x41 (65 in decimal)
|
||||
* - 0x400 (1024 in decimal)
|
||||
* - WRONG: 0x (should always have at least one digit - zero is “0x0”)
|
||||
* - WRONG: 0x0400 (no leading zeroes allowed)
|
||||
* - WRONG: ff (must be prefixed 0x)
|
||||
*/
|
||||
export function numToQuantity(num: number | bigint): QUANTITY {
|
||||
return "0x" + num.toString(16);
|
||||
}
|
||||
|
||||
/**
|
||||
* QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API
|
||||
*/
|
||||
export function quantityToNum(hex: QUANTITY, id = ""): number {
|
||||
const num = parseInt(hex, 16);
|
||||
if (Number.isNaN(num) || num < 0) throw Error(`Invalid hex decimal ${id} '${hex}'`);
|
||||
return num;
|
||||
}
|
||||
|
||||
/**
|
||||
* QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API.
|
||||
* Typesafe fn to convert hex string to bigint. The BigInt constructor param is any
|
||||
*/
|
||||
export function quantityToBigint(hex: QUANTITY, id = ""): bigint {
|
||||
try {
|
||||
return BigInt(hex);
|
||||
} catch (e) {
|
||||
throw Error(`Invalid hex bigint ${id} '${hex}': ${(e as Error).message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API.
|
||||
*/
|
||||
export function quantityToBytes(hex: QUANTITY): Uint8Array {
|
||||
const bn = quantityToBigint(hex);
|
||||
return bigIntToBytes(bn, 32, "le");
|
||||
}
|
||||
|
||||
/**
|
||||
* QUANTITY as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API.
|
||||
* Compress a 32 ByteVector into a QUANTITY
|
||||
*/
|
||||
export function bytesToQuantity(bytes: Uint8Array): QUANTITY {
|
||||
const bn = bytesToBigInt(bytes, "le");
|
||||
return numToQuantity(bn);
|
||||
}
|
||||
|
||||
/**
|
||||
* DATA as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API
|
||||
*
|
||||
* When encoding UNFORMATTED DATA (byte arrays, account addresses, hashes, bytecode arrays): encode as hex, prefix with
|
||||
* “0x”, two hex digits per byte. Examples:
|
||||
*
|
||||
* - 0x41 (size 1, “A”)
|
||||
* - 0x004200 (size 3, “\0B\0”)
|
||||
* - 0x (size 0, “”)
|
||||
* - WRONG: 0xf0f0f (must be even number of digits)
|
||||
* - WRONG: 004200 (must be prefixed 0x)
|
||||
*/
|
||||
export function bytesToData(bytes: Uint8Array): DATA {
|
||||
return toHex(bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* DATA as defined in ethereum execution layer JSON RPC https://eth.wiki/json-rpc/API
|
||||
*/
|
||||
export function dataToBytes(hex: DATA, fixedLength: number | null): Uint8Array {
|
||||
try {
|
||||
const bytes = fromHex(hex);
|
||||
if (fixedLength != null && bytes.length !== fixedLength) {
|
||||
throw Error(`Wrong data length ${bytes.length} expected ${fixedLength}`);
|
||||
}
|
||||
return bytes;
|
||||
} catch (e) {
|
||||
(e as Error).message = `Invalid hex string: ${(e as Error).message}`;
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert DATA into a preallocated buffer
|
||||
* fromHexInto will throw if buffer's length is not the same as the decoded hex length
|
||||
*/
|
||||
export function dataIntoBytes(hex: DATA, buffer: Uint8Array): Uint8Array {
|
||||
fromHexInto(hex, buffer);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
export type JsonRpcBackend = {
|
||||
// biome-ignore lint/suspicious/noExplicitAny: We need to use `any` type here
|
||||
|
||||
@@ -2,11 +2,10 @@
|
||||
|
||||
export type {RestApiServerMetrics, RestApiServerModules, RestApiServerOpts} from "./api/rest/base.js";
|
||||
export {RestApiServer} from "./api/rest/base.js";
|
||||
export {checkAndPersistAnchorState, initStateFromDb, initStateFromEth1} from "./chain/index.js";
|
||||
export {checkAndPersistAnchorState, initStateFromDb} from "./chain/index.js";
|
||||
export {DbCPStateDatastore} from "./chain/stateCache/datastore/db.js";
|
||||
export {FileCPStateDatastore} from "./chain/stateCache/datastore/file.js";
|
||||
export {BeaconDb, type IBeaconDb} from "./db/index.js";
|
||||
export {Eth1Provider, type IEth1Provider} from "./eth1/index.js";
|
||||
// Export metrics utilities to de-duplicate validator metrics
|
||||
export {
|
||||
type HttpMetricsServer,
|
||||
|
||||
@@ -1619,150 +1619,6 @@ export function createLodestarMetrics(
|
||||
}),
|
||||
},
|
||||
|
||||
eth1: {
|
||||
depositTrackerIsCaughtup: register.gauge({
|
||||
name: "lodestar_eth1_deposit_tracker_is_caughtup",
|
||||
help: "Eth1 deposit is caught up 0=false 1=true",
|
||||
}),
|
||||
depositTrackerUpdateErrors: register.gauge({
|
||||
name: "lodestar_eth1_deposit_tracker_update_errors_total",
|
||||
help: "Eth1 deposit update loop errors total",
|
||||
}),
|
||||
remoteHighestBlock: register.gauge({
|
||||
name: "lodestar_eth1_remote_highest_block",
|
||||
help: "Eth1 current highest block number",
|
||||
}),
|
||||
depositEventsFetched: register.gauge({
|
||||
name: "lodestar_eth1_deposit_events_fetched_total",
|
||||
help: "Eth1 deposit events fetched total",
|
||||
}),
|
||||
lastProcessedDepositBlockNumber: register.gauge({
|
||||
name: "lodestar_eth1_last_processed_deposit_block_number",
|
||||
help: "Eth1 deposit tracker lastProcessedDepositBlockNumber",
|
||||
}),
|
||||
blocksFetched: register.gauge({
|
||||
name: "lodestar_eth1_blocks_fetched_total",
|
||||
help: "Eth1 blocks fetched total",
|
||||
}),
|
||||
lastFetchedBlockBlockNumber: register.gauge({
|
||||
name: "lodestar_eth1_last_fetched_block_block_number",
|
||||
help: "Eth1 deposit tracker last fetched block's block number",
|
||||
}),
|
||||
lastFetchedBlockTimestamp: register.gauge({
|
||||
name: "lodestar_eth1_last_fetched_block_timestamp",
|
||||
help: "Eth1 deposit tracker last fetched block's timestamp",
|
||||
}),
|
||||
eth1FollowDistanceSecondsConfig: register.gauge({
|
||||
name: "lodestar_eth1_follow_distance_seconds_config",
|
||||
help: "Constant with value = SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE",
|
||||
}),
|
||||
eth1FollowDistanceDynamic: register.gauge({
|
||||
name: "lodestar_eth1_follow_distance_dynamic",
|
||||
help: "Eth1 dynamic follow distance changed by the deposit tracker if blocks are slow",
|
||||
}),
|
||||
eth1GetBlocksBatchSizeDynamic: register.gauge({
|
||||
name: "lodestar_eth1_blocks_batch_size_dynamic",
|
||||
help: "Dynamic batch size to fetch blocks",
|
||||
}),
|
||||
eth1GetLogsBatchSizeDynamic: register.gauge({
|
||||
name: "lodestar_eth1_logs_batch_size_dynamic",
|
||||
help: "Dynamic batch size to fetch deposit logs",
|
||||
}),
|
||||
|
||||
// Merge Search info
|
||||
eth1MergeStatus: register.gauge({
|
||||
name: "lodestar_eth1_merge_status",
|
||||
help: "Eth1 Merge Status 0 PRE_MERGE 1 SEARCHING 2 FOUND 3 POST_MERGE",
|
||||
}),
|
||||
eth1MergeTDFactor: register.gauge({
|
||||
name: "lodestar_eth1_merge_td_factor",
|
||||
help: "TTD set for the merge",
|
||||
}),
|
||||
eth1MergeTTD: register.gauge({
|
||||
name: "lodestar_eth1_merge_ttd",
|
||||
help: "TTD set for the merge scaled down by td_factor",
|
||||
}),
|
||||
|
||||
eth1PollMergeBlockErrors: register.gauge({
|
||||
name: "lodestar_eth1_poll_merge_block_errors_total",
|
||||
help: "Total count of errors polling merge block",
|
||||
}),
|
||||
getTerminalPowBlockPromiseCacheHit: register.gauge({
|
||||
name: "lodestar_eth1_get_terminal_pow_block_promise_cache_hit_total",
|
||||
help: "Total count of skipped runs in poll merge block, because a previous promise existed",
|
||||
}),
|
||||
eth1ParentBlocksFetched: register.gauge({
|
||||
name: "lodestar_eth1_parent_blocks_fetched_total",
|
||||
help: "Total count of parent blocks fetched searching for merge block",
|
||||
}),
|
||||
|
||||
// Latest block details
|
||||
eth1LatestBlockTD: register.gauge({
|
||||
name: "lodestar_eth1_latest_block_ttd",
|
||||
help: "Eth1 latest Block td scaled down by td_factor",
|
||||
}),
|
||||
eth1LatestBlockNumber: register.gauge({
|
||||
name: "lodestar_eth1_latest_block_number",
|
||||
help: "Eth1 latest block number",
|
||||
}),
|
||||
eth1LatestBlockTimestamp: register.gauge({
|
||||
name: "lodestar_eth1_latest_block_timestamp",
|
||||
help: "Eth1 latest block timestamp",
|
||||
}),
|
||||
|
||||
// Merge details
|
||||
eth1MergeBlockDetails: register.gauge<{
|
||||
terminalBlockHash: string;
|
||||
terminalBlockNumber: string;
|
||||
terminalBlockTD: string;
|
||||
}>({
|
||||
name: "lodestar_eth1_merge_block_details",
|
||||
help: "If found then 1 with terminal block details",
|
||||
labelNames: ["terminalBlockHash", "terminalBlockNumber", "terminalBlockTD"],
|
||||
}),
|
||||
},
|
||||
|
||||
eth1HttpClient: {
|
||||
requestTime: register.histogram<{routeId: string}>({
|
||||
name: "lodestar_eth1_http_client_request_time_seconds",
|
||||
help: "eth1 JsonHttpClient - histogram or roundtrip request times",
|
||||
labelNames: ["routeId"],
|
||||
// Provide max resolution on problematic values around 1 second
|
||||
buckets: [0.1, 0.5, 1, 2, 5, 15],
|
||||
}),
|
||||
streamTime: register.histogram<{routeId: string}>({
|
||||
name: "lodestar_eth1_http_client_stream_time_seconds",
|
||||
help: "eth1 JsonHttpClient - streaming time by routeId",
|
||||
labelNames: ["routeId"],
|
||||
// Provide max resolution on problematic values around 1 second
|
||||
buckets: [0.1, 0.5, 1, 2, 5, 15],
|
||||
}),
|
||||
requestErrors: register.gauge<{routeId: string}>({
|
||||
name: "lodestar_eth1_http_client_request_errors_total",
|
||||
help: "eth1 JsonHttpClient - total count of request errors",
|
||||
labelNames: ["routeId"],
|
||||
}),
|
||||
retryCount: register.gauge<{routeId: string}>({
|
||||
name: "lodestar_eth1_http_client_request_retries_total",
|
||||
help: "eth1 JsonHttpClient - total count of request retries",
|
||||
labelNames: ["routeId"],
|
||||
}),
|
||||
requestUsedFallbackUrl: register.gauge<{routeId: string}>({
|
||||
name: "lodestar_eth1_http_client_request_used_fallback_url_total",
|
||||
help: "eth1 JsonHttpClient - total count of requests on fallback url(s)",
|
||||
labelNames: ["routeId"],
|
||||
}),
|
||||
activeRequests: register.gauge<{routeId: string}>({
|
||||
name: "lodestar_eth1_http_client_active_requests",
|
||||
help: "eth1 JsonHttpClient - current count of active requests",
|
||||
labelNames: ["routeId"],
|
||||
}),
|
||||
configUrlsCount: register.gauge({
|
||||
name: "lodestar_eth1_http_client_config_urls_count",
|
||||
help: "eth1 JsonHttpClient - static config urls count",
|
||||
}),
|
||||
},
|
||||
|
||||
executionEnginerHttpClient: {
|
||||
requestTime: register.histogram<{routeId: string}>({
|
||||
name: "lodestar_execution_engine_http_client_request_time_seconds",
|
||||
|
||||
@@ -2,7 +2,7 @@ import EventEmitter from "node:events";
|
||||
import {ResponseIncoming, ResponseOutgoing} from "@lodestar/reqresp";
|
||||
import {AsyncIterableEventBus, IteratorEvent, RequestEvent} from "../../util/asyncIterableToEvents.js";
|
||||
import {StrictEventEmitterSingleArg} from "../../util/strictEvents.js";
|
||||
import {EventDirection} from "../../util/workerEvents.js";
|
||||
import {EventDirection} from "../events.js";
|
||||
import {IncomingRequestArgs, OutgoingRequestArgs} from "../reqresp/types.js";
|
||||
|
||||
export enum ReqRespBridgeEvent {
|
||||
|
||||
@@ -3,7 +3,6 @@ import {PeerId, TopicValidatorResult} from "@libp2p/interface";
|
||||
import {CustodyIndex, Status} from "@lodestar/types";
|
||||
import {PeerIdStr} from "../util/peerId.js";
|
||||
import {StrictEventEmitterSingleArg} from "../util/strictEvents.js";
|
||||
import {EventDirection} from "../util/workerEvents.js";
|
||||
import {PendingGossipsubMessage} from "./processor/types.js";
|
||||
import {RequestTypedContainer} from "./reqresp/ReqRespBeaconNode.js";
|
||||
|
||||
@@ -38,6 +37,13 @@ export type NetworkEventData = {
|
||||
};
|
||||
};
|
||||
|
||||
export enum EventDirection {
|
||||
workerToMain,
|
||||
mainToWorker,
|
||||
/** Event not emitted through worker boundary */
|
||||
none,
|
||||
}
|
||||
|
||||
export const networkEventDirection: Record<NetworkEvent, EventDirection> = {
|
||||
[NetworkEvent.peerConnected]: EventDirection.workerToMain,
|
||||
[NetworkEvent.peerDisconnected]: EventDirection.workerToMain,
|
||||
|
||||
@@ -2,10 +2,11 @@ import {setMaxListeners} from "node:events";
|
||||
import {PrivateKey} from "@libp2p/interface";
|
||||
import {Registry} from "prom-client";
|
||||
import {hasher} from "@chainsafe/persistent-merkle-tree";
|
||||
import {PubkeyIndexMap} from "@chainsafe/pubkey-index-map";
|
||||
import {BeaconApiMethods} from "@lodestar/api/beacon/server";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import type {LoggerNode} from "@lodestar/logger/node";
|
||||
import {BeaconStateAllForks} from "@lodestar/state-transition";
|
||||
import {CachedBeaconStateAllForks, Index2PubkeyCache} from "@lodestar/state-transition";
|
||||
import {phase0} from "@lodestar/types";
|
||||
import {sleep} from "@lodestar/utils";
|
||||
import {ProcessShutdownCallback} from "@lodestar/validator";
|
||||
@@ -13,7 +14,6 @@ import {BeaconRestApiServer, getApi} from "../api/index.js";
|
||||
import {BeaconChain, IBeaconChain, initBeaconMetrics} from "../chain/index.js";
|
||||
import {ValidatorMonitor, createValidatorMonitor} from "../chain/validatorMonitor.js";
|
||||
import {IBeaconDb} from "../db/index.js";
|
||||
import {initializeEth1ForBlockProduction} from "../eth1/index.js";
|
||||
import {initializeExecutionBuilder, initializeExecutionEngine} from "../execution/index.js";
|
||||
import {HttpMetricsServer, Metrics, createMetrics, getHttpMetricsServer} from "../metrics/index.js";
|
||||
import {MonitoringService} from "../monitoring/index.js";
|
||||
@@ -46,13 +46,15 @@ export type BeaconNodeModules = {
|
||||
export type BeaconNodeInitModules = {
|
||||
opts: IBeaconNodeOptions;
|
||||
config: BeaconConfig;
|
||||
pubkey2index: PubkeyIndexMap;
|
||||
index2pubkey: Index2PubkeyCache;
|
||||
db: IBeaconDb;
|
||||
logger: LoggerNode;
|
||||
processShutdownCallback: ProcessShutdownCallback;
|
||||
privateKey: PrivateKey;
|
||||
dataDir: string;
|
||||
peerStoreDir?: string;
|
||||
anchorState: BeaconStateAllForks;
|
||||
anchorState: CachedBeaconStateAllForks;
|
||||
isAnchorStateFinalized: boolean;
|
||||
wsCheckpoint?: phase0.Checkpoint;
|
||||
metricsRegistries?: Registry[];
|
||||
@@ -68,7 +70,6 @@ enum LoggerModule {
|
||||
api = "api",
|
||||
backfill = "backfill",
|
||||
chain = "chain",
|
||||
eth1 = "eth1",
|
||||
execution = "execution",
|
||||
metrics = "metrics",
|
||||
monitoring = "monitoring",
|
||||
@@ -148,6 +149,8 @@ export class BeaconNode {
|
||||
static async init<T extends BeaconNode = BeaconNode>({
|
||||
opts,
|
||||
config,
|
||||
pubkey2index,
|
||||
index2pubkey,
|
||||
db,
|
||||
logger,
|
||||
processShutdownCallback,
|
||||
@@ -199,6 +202,17 @@ export class BeaconNode {
|
||||
// TODO: Should this call be awaited?
|
||||
await db.pruneHotDb();
|
||||
|
||||
// Delete deprecated eth1 data to free up disk space for users
|
||||
logger.debug("Deleting deprecated eth1 data from database");
|
||||
const startTime = Date.now();
|
||||
db.deleteDeprecatedEth1Data()
|
||||
.then(() => {
|
||||
logger.debug("Deleted deprecated eth1 data", {durationMs: Date.now() - startTime});
|
||||
})
|
||||
.catch((e) => {
|
||||
logger.error("Failed to delete deprecated eth1 data", {}, e);
|
||||
});
|
||||
|
||||
const monitoring = opts.monitoring.endpoint
|
||||
? new MonitoringService(
|
||||
"beacon",
|
||||
@@ -211,6 +225,8 @@ export class BeaconNode {
|
||||
privateKey,
|
||||
config,
|
||||
clock,
|
||||
pubkey2index,
|
||||
index2pubkey,
|
||||
dataDir,
|
||||
db,
|
||||
dbName: opts.db.name,
|
||||
@@ -220,13 +236,6 @@ export class BeaconNode {
|
||||
validatorMonitor,
|
||||
anchorState,
|
||||
isAnchorStateFinalized,
|
||||
eth1: initializeEth1ForBlockProduction(opts.eth1, {
|
||||
config,
|
||||
db,
|
||||
metrics,
|
||||
logger: logger.child({module: LoggerModule.eth1}),
|
||||
signal,
|
||||
}),
|
||||
executionEngine: initializeExecutionEngine(opts.executionEngine, {
|
||||
metrics,
|
||||
signal,
|
||||
|
||||
@@ -36,7 +36,6 @@ export async function runNodeNotifier(modules: NodeNotifierModules): Promise<voi
|
||||
const {network, chain, sync, config, logger, signal} = modules;
|
||||
|
||||
const headSlotTimeSeries = new TimeSeries({maxPoints: 10});
|
||||
const tdTimeSeries = new TimeSeries({maxPoints: 50});
|
||||
|
||||
const SLOTS_PER_SYNC_COMMITTEE_PERIOD = SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD;
|
||||
let hasLowPeerCount = false;
|
||||
@@ -87,21 +86,6 @@ export async function runNodeNotifier(modules: NodeNotifierModules): Promise<voi
|
||||
const executionInfo = getHeadExecutionInfo(config, clockEpoch, headState, headInfo);
|
||||
const finalizedCheckpointRow = `finalized: ${prettyBytes(finalizedRoot)}:${finalizedEpoch}`;
|
||||
|
||||
// Log in TD progress in separate line to not clutter regular status update.
|
||||
// This line will only exist between BELLATRIX_FORK_EPOCH and TTD, a window of some days / weeks max.
|
||||
// Notifier log lines must be kept at a reasonable max width otherwise it's very hard to read
|
||||
const tdProgress = chain.eth1.getTDProgress();
|
||||
if (tdProgress !== null && !tdProgress.ttdHit) {
|
||||
tdTimeSeries.addPoint(tdProgress.tdDiffScaled, tdProgress.timestamp);
|
||||
|
||||
const timestampTDD = tdTimeSeries.computeY0Point();
|
||||
// It is possible to get ttd estimate with an error at imminent merge
|
||||
const secToTTD = Math.max(Math.floor(timestampTDD - Date.now() / 1000), 0);
|
||||
const timeLeft = Number.isFinite(secToTTD) ? prettyTimeDiffSec(secToTTD) : "?";
|
||||
|
||||
logger.info(`TTD in ${timeLeft} current TD ${tdProgress.td} / ${tdProgress.ttd}`);
|
||||
}
|
||||
|
||||
let nodeState: string[];
|
||||
switch (sync.state) {
|
||||
case SyncState.SyncingFinalized:
|
||||
|
||||
@@ -2,7 +2,6 @@ import {ApiOptions, defaultApiOptions} from "../api/options.js";
|
||||
import {ArchiveMode, DEFAULT_ARCHIVE_MODE, IChainOptions, defaultChainOptions} from "../chain/options.js";
|
||||
import {ValidatorMonitorOpts, defaultValidatorMonitorOpts} from "../chain/validatorMonitor.js";
|
||||
import {DatabaseOptions, defaultDbOptions} from "../db/options.js";
|
||||
import {Eth1Options, defaultEth1Options} from "../eth1/options.js";
|
||||
import {
|
||||
ExecutionBuilderOpts,
|
||||
ExecutionEngineOpts,
|
||||
@@ -26,7 +25,6 @@ export interface IBeaconNodeOptions {
|
||||
api: ApiOptions;
|
||||
chain: IChainOptions;
|
||||
db: DatabaseOptions;
|
||||
eth1: Eth1Options;
|
||||
executionEngine: ExecutionEngineOpts;
|
||||
executionBuilder: ExecutionBuilderOpts;
|
||||
metrics: MetricsOptions;
|
||||
@@ -40,7 +38,6 @@ export const defaultOptions: IBeaconNodeOptions = {
|
||||
api: defaultApiOptions,
|
||||
chain: defaultChainOptions,
|
||||
db: defaultDbOptions,
|
||||
eth1: defaultEth1Options,
|
||||
executionEngine: defaultExecutionEngineOpts,
|
||||
executionBuilder: defaultExecutionBuilderOpts,
|
||||
metrics: defaultMetricsOptions,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user