mirror of
https://github.com/ChainSafe/lodestar.git
synced 2026-01-09 15:48:08 -05:00
chore: v1.34.0 release (#8344)
This commit is contained in:
@@ -1,10 +1,10 @@
|
||||
# We use these images during sim and e2e tests
|
||||
# This is the last version which supports pre/post merge chains in the same network
|
||||
# All newer versions only work with post merge chains
|
||||
GETH_DOCKER_IMAGE=ethereum/client-go:v1.13.14
|
||||
GETH_DOCKER_IMAGE=ethereum/client-go:v1.16.2
|
||||
# Use either image or local binary for the testing
|
||||
GETH_BINARY_DIR=
|
||||
LIGHTHOUSE_DOCKER_IMAGE=sigp/lighthouse:v7.0.1
|
||||
LIGHTHOUSE_DOCKER_IMAGE=ethpandaops/lighthouse:unstable-d235f2c
|
||||
|
||||
# We can't upgrade nethermind further due to genesis hash mismatch with the geth
|
||||
# https://github.com/NethermindEth/nethermind/issues/6683
|
||||
|
||||
152
.github/workflows/publish-nextfork.yml
vendored
Normal file
152
.github/workflows/publish-nextfork.yml
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
name: Publish nextfork release
|
||||
|
||||
# only one per github sha can be run
|
||||
concurrency:
|
||||
group: cd-publish-nextfork
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- peerDAS # Nextfork branch
|
||||
|
||||
env:
|
||||
NEXT_FORK: peerDAS
|
||||
|
||||
jobs:
|
||||
npm:
|
||||
name: Publish to NPM Registry
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
steps:
|
||||
# <common-build> - Uses YAML anchors in the future
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 22
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
check-latest: true
|
||||
cache: yarn
|
||||
- name: Node.js version
|
||||
id: node
|
||||
run: echo "v8CppApiVersion=$(node --print "process.versions.modules")" >> $GITHUB_OUTPUT
|
||||
- name: Restore dependencies
|
||||
uses: actions/cache@master
|
||||
id: cache-deps
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
packages/*/node_modules
|
||||
key: ${{ runner.os }}-${{ steps.node.outputs.v8CppApiVersion }}-${{ hashFiles('**/yarn.lock', '**/package.json') }}
|
||||
- name: Install & build
|
||||
if: steps.cache-deps.outputs.cache-hit != 'true'
|
||||
run: yarn install --frozen-lockfile && yarn build
|
||||
- name: Build
|
||||
run: yarn build
|
||||
if: steps.cache-deps.outputs.cache-hit == 'true'
|
||||
# </common-build>
|
||||
- name: Get version
|
||||
id: version
|
||||
run: |
|
||||
PACKAGE_VERSION=$(node -p "require('./packages/cli/package.json').version")
|
||||
NEXT_VERSION=$(npx --yes semver --increment minor $PACKAGE_VERSION)
|
||||
export VERSION=${NEXT_VERSION}-${NEXT_FORK}.${GITHUB_SHA:0:10}
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo PACKAGE_VERSION $PACKAGE_VERSION GITHUB_SHA $GITHUB_SHA VERSION $VERSION
|
||||
|
||||
- name: Change and commit version
|
||||
# Write version before publishing so it's picked up by `lerna publish from-package`.
|
||||
# It must also be committed to ensure a clean git tree, otherwise `lerna publish` errors.
|
||||
# This "temp" commit doesn't change the actually release commit which is captured above.
|
||||
# git-data is also correct, since it's generated at build time, before `lerna version` run.
|
||||
run: |
|
||||
node_modules/.bin/lerna version ${{ steps.version.outputs.version }} \
|
||||
--force-publish \
|
||||
--exact \
|
||||
--yes \
|
||||
--no-git-tag-version
|
||||
|
||||
git config user.name 'temp'
|
||||
git config user.email 'temp@github.com'
|
||||
git commit -am "${{ steps.version.outputs.version }}"
|
||||
|
||||
- name: Publish to npm registry
|
||||
# Note: before https://github.com/ChainSafe/lodestar/commit/28e2c74cf0f1bede8b09c8c9fec26f54b367e3fd
|
||||
# We used `lerna publish --canary` option. However, since we now publish must version on branches,
|
||||
# i.e. v0.35.x branch, lerna fails to detect the latest version and publishes canary versions as
|
||||
# `0.34.0-dev.173+28e2c74cf0` instead of `0.36.0-dev.4+28e2c74cf0`, which creates confusion.
|
||||
#
|
||||
# --no-git-reset:
|
||||
# Do not delete code version artifacts so the next step can pick the version
|
||||
#
|
||||
# --dist-tag next:
|
||||
# Make this dev version installable with `@next`
|
||||
#
|
||||
# --preid dev:
|
||||
# Tag version with `dev` instead of `alpha`
|
||||
#
|
||||
# --force-publish:
|
||||
# lerna doesn't want to publish anything otherwise - "lerna success No changed packages
|
||||
# to publish"
|
||||
# --exact
|
||||
# lerna will link the dependencies of monorepo packages without ^ operator as npm
|
||||
# is apparently bad at resolving ^ dependencies of the canary versions. For e.g
|
||||
# @chainsafe/lodestar@^0.34.0-dev.4 resolves to => 0.34.0
|
||||
#
|
||||
# NOTE: Using --preid dev.$(git rev-parse --short=7 HEAD) results in `0.24.3-dev.3ddb91d.0+3ddb91d`
|
||||
run: |
|
||||
node_modules/.bin/lerna publish from-package \
|
||||
--yes \
|
||||
--no-verify-access \
|
||||
--dist-tag next \
|
||||
--no-git-reset \
|
||||
--force-publish \
|
||||
--exact
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
|
||||
docker:
|
||||
name: Publish to Docker Hub
|
||||
runs-on: buildjet-4vcpu-ubuntu-2204
|
||||
needs: npm
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
# https://github.com/docker/setup-qemu-action
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
# https://github.com/docker/setup-buildx-action
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push lodestar
|
||||
run: >
|
||||
docker buildx build . --push
|
||||
--tag chainsafe/lodestar:nextfork
|
||||
--platform linux/amd64,linux/arm64
|
||||
--build-arg COMMIT=$(git rev-parse HEAD)
|
||||
|
||||
- run: docker run chainsafe/lodestar:nextfork --help
|
||||
# Display history to know byte size of each layer
|
||||
# Image is available only because of the previous `docker run` command
|
||||
- run: docker image history chainsafe/lodestar:nextfork
|
||||
|
||||
- name: Build and push custom Grafana
|
||||
run: >
|
||||
docker buildx build ./docker/grafana/ --push
|
||||
--file ./docker/grafana/Dockerfile
|
||||
--build-context dashboards=./dashboards
|
||||
--tag chainsafe/lodestar-grafana:nextfork
|
||||
--platform linux/amd64,linux/arm64
|
||||
|
||||
- name: Build and push custom Prometheus
|
||||
run: >
|
||||
docker buildx build ./docker/prometheus/ --push
|
||||
--file ./docker/prometheus/Dockerfile
|
||||
--tag chainsafe/lodestar-prometheus:nextfork
|
||||
--platform linux/amd64,linux/arm64
|
||||
83
biome.jsonc
83
biome.jsonc
@@ -9,14 +9,24 @@
|
||||
"vcs": {
|
||||
"defaultBranch": "unstable"
|
||||
},
|
||||
"assist": {
|
||||
"actions": {
|
||||
"source": {
|
||||
"organizeImports": { "level": "off", "options": { "identifierOrder": "lexicographic" } },
|
||||
"useSortedKeys": { "level": "off", "options": { "sortOrder": "lexicographic" } }
|
||||
}
|
||||
}
|
||||
},
|
||||
"files": {
|
||||
"include": ["packages/*/src/**/*.ts", "packages/*/test/**/*.ts", "configs/**/*.ts", "./vitest.config.ts"]
|
||||
},
|
||||
"formatter": {
|
||||
"ignore": ["**/lib", "**/.nyc_output", "./packages/*/spec-tests", "**/node_modules", "./packages/*/node_modules/**"]
|
||||
},
|
||||
"organizeImports": {
|
||||
"enabled": true
|
||||
"includes": [
|
||||
"**/packages/**/*/src/**/*.ts",
|
||||
"**/packages/**/*/test/**/*.ts",
|
||||
"**/configs/**/*.ts",
|
||||
"vitest.config.ts",
|
||||
"!**/lib",
|
||||
"!packages/**/*/spec-tests",
|
||||
"!packages/**/*/node_modules/"
|
||||
]
|
||||
},
|
||||
"linter": {
|
||||
"rules": {
|
||||
@@ -24,13 +34,11 @@
|
||||
"useImportExtensions": {
|
||||
"level": "error",
|
||||
"options": {
|
||||
"suggestedExtensions": {
|
||||
"ts": {
|
||||
"module": "js",
|
||||
"component": "jsx"
|
||||
}
|
||||
}
|
||||
"forceJsExtensions": false
|
||||
}
|
||||
},
|
||||
"useParseIntRadix": {
|
||||
"level": "off"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
@@ -38,14 +46,14 @@
|
||||
"noDelete": "off"
|
||||
},
|
||||
"style": {
|
||||
// The code usage looks suspicious so it should be enabled in a separate PR
|
||||
"noCommaOperator": "off",
|
||||
// Will be enabled in a separate PR
|
||||
"useArrayLiterals": "off",
|
||||
// There are a lot of places we mutate params, should be fixed in an independent PR.
|
||||
"noParameterAssign": "off",
|
||||
"noRestrictedGlobals": {
|
||||
"level": "error",
|
||||
"options": {
|
||||
"deniedGlobals": ["fetch"]
|
||||
"deniedGlobals": { "fetch": "Please use 'fetch' from '@lodestar/api' instead." }
|
||||
}
|
||||
},
|
||||
// In some cases the enums are initialized with values of other enums
|
||||
@@ -143,19 +151,31 @@
|
||||
}
|
||||
},
|
||||
"suspicious": {
|
||||
// Will be enabled in separate PR
|
||||
"useIterableCallbackReturn": "off",
|
||||
// There is a lot of empty code blocks, should be enabled and clean up separately.
|
||||
"noEmptyBlockStatements": "off"
|
||||
"noEmptyBlockStatements": "off",
|
||||
// We are using `Object.prototype.hasOwnProperty` a lot because compiling lib is set to prior 2022
|
||||
"noPrototypeBuiltins": "off"
|
||||
},
|
||||
"nursery": {
|
||||
// Need to enable this rule with exception to anonymous functions
|
||||
"useExplicitType": "off"
|
||||
},
|
||||
"complexity": {
|
||||
// Should be done in a separate PR
|
||||
"useIndexOf": "off",
|
||||
// Should be done in a separate PR
|
||||
"useDateNow": "off",
|
||||
// The code usage looks suspicious so it should be enabled in a separate PR
|
||||
"noCommaOperator": "off"
|
||||
}
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
// Code using console output
|
||||
{
|
||||
"include": ["packages/cli/src/", "packages/test-utils/src", "packages/flare/src"],
|
||||
"includes": ["**/packages/cli/src/**", "**/packages/test-utils/src/**", "**/packages/flare/src/**"],
|
||||
"linter": {
|
||||
"rules": {
|
||||
"suspicious": {
|
||||
@@ -166,7 +186,7 @@
|
||||
},
|
||||
// All test files
|
||||
{
|
||||
"include": ["**/test/**/*.ts", "packages/spec-test-util/src"],
|
||||
"includes": ["**/packages/spec-test-util/src/**"],
|
||||
"linter": {
|
||||
"rules": {
|
||||
"complexity": {
|
||||
@@ -185,20 +205,17 @@
|
||||
}
|
||||
},
|
||||
{
|
||||
"include": [
|
||||
// These files are using mix cases e.g. `engine_newPayloadV4`
|
||||
// It's a mix of snake_case and camelCase, which can't validated by biome
|
||||
"packages/beacon-node/src/db/buckets.ts",
|
||||
"packages/beacon-node/src/execution/engine/mock.ts",
|
||||
"packages/beacon-node/src/execution/engine/types.ts",
|
||||
"packages/beacon-node/src/eth1/provider/eth1Provider.ts",
|
||||
"packages/validator/src/buckets.ts",
|
||||
"packages/prover/src/types.ts",
|
||||
"prover/src/utils/process.ts",
|
||||
"prover/src/verified_requests/**/*.ts",
|
||||
"packages/types/src/utils/**/*.ts",
|
||||
// This file is using snake_case function names
|
||||
"packages/beacon-node/test/spec/bls/bls.ts"
|
||||
"includes": [
|
||||
"**/packages/beacon-node/src/db/buckets.ts",
|
||||
"**/packages/beacon-node/src/execution/engine/mock.ts",
|
||||
"**/packages/beacon-node/src/execution/engine/types.ts",
|
||||
"**/packages/beacon-node/src/eth1/provider/eth1Provider.ts",
|
||||
"**/packages/validator/src/buckets.ts",
|
||||
"**/packages/prover/src/types.ts",
|
||||
"**/prover/src/utils/process.ts",
|
||||
"**/prover/src/verified_requests/**/*.ts",
|
||||
"**/packages/types/src/utils/**/*.ts",
|
||||
"**/packages/beacon-node/test/spec/bls/bls.ts"
|
||||
],
|
||||
"linter": {
|
||||
"rules": {
|
||||
|
||||
@@ -952,6 +952,168 @@
|
||||
"x": 0,
|
||||
"y": 25
|
||||
},
|
||||
"id": 572,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(rate(beacon_block_production_consensus_block_value_sum[$rate_interval]))\n/\nsum(rate(beacon_block_production_consensus_block_value_count[$rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "value",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Consensus block value average",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
}
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 25
|
||||
},
|
||||
"id": 573,
|
||||
"options": {
|
||||
"calculate": false,
|
||||
"cellGap": 1,
|
||||
"color": {
|
||||
"exponent": 0.5,
|
||||
"fill": "dark-orange",
|
||||
"mode": "scheme",
|
||||
"reverse": false,
|
||||
"scale": "exponential",
|
||||
"scheme": "Magma",
|
||||
"steps": 64
|
||||
},
|
||||
"exemplars": {
|
||||
"color": "rgba(255,0,255,0.7)"
|
||||
},
|
||||
"filterValues": {
|
||||
"le": 1e-9
|
||||
},
|
||||
"legend": {
|
||||
"show": true
|
||||
},
|
||||
"rowsFrame": {
|
||||
"layout": "auto"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"showColorScale": false,
|
||||
"yHistogram": false
|
||||
},
|
||||
"yAxis": {
|
||||
"axisPlacement": "left",
|
||||
"reverse": false
|
||||
}
|
||||
},
|
||||
"pluginVersion": "10.4.1",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rate(beacon_block_production_consensus_block_value_bucket[$rate_interval])",
|
||||
"format": "heatmap",
|
||||
"instant": false,
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Consensus block value histogram",
|
||||
"type": "heatmap"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 33
|
||||
},
|
||||
"id": 511,
|
||||
"options": {
|
||||
"legend": {
|
||||
@@ -1117,7 +1279,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 25
|
||||
"y": 33
|
||||
},
|
||||
"id": 378,
|
||||
"options": {
|
||||
@@ -1203,7 +1365,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 33
|
||||
"y": 41
|
||||
},
|
||||
"id": 376,
|
||||
"options": {
|
||||
@@ -1288,7 +1450,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 33
|
||||
"y": 41
|
||||
},
|
||||
"id": 532,
|
||||
"options": {
|
||||
@@ -1391,7 +1553,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 41
|
||||
"y": 49
|
||||
},
|
||||
"id": 531,
|
||||
"options": {
|
||||
@@ -1476,7 +1638,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 41
|
||||
"y": 49
|
||||
},
|
||||
"id": 534,
|
||||
"options": {
|
||||
@@ -1527,7 +1689,7 @@
|
||||
"h": 6,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 48
|
||||
"y": 56
|
||||
},
|
||||
"id": 535,
|
||||
"options": {
|
||||
@@ -1641,7 +1803,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 48
|
||||
"y": 56
|
||||
},
|
||||
"id": 537,
|
||||
"options": {
|
||||
@@ -1734,7 +1896,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 54
|
||||
"y": 62
|
||||
},
|
||||
"id": 548,
|
||||
"options": {
|
||||
@@ -1818,7 +1980,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 56
|
||||
"y": 64
|
||||
},
|
||||
"id": 549,
|
||||
"options": {
|
||||
@@ -1858,7 +2020,7 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 64
|
||||
"y": 72
|
||||
},
|
||||
"id": 541,
|
||||
"panels": [],
|
||||
@@ -1889,7 +2051,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 65
|
||||
"y": 73
|
||||
},
|
||||
"id": 543,
|
||||
"options": {
|
||||
@@ -1968,7 +2130,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 65
|
||||
"y": 73
|
||||
},
|
||||
"id": 545,
|
||||
"options": {
|
||||
@@ -2073,7 +2235,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 73
|
||||
"y": 81
|
||||
},
|
||||
"id": 539,
|
||||
"options": {
|
||||
@@ -2167,7 +2329,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 73
|
||||
"y": 81
|
||||
},
|
||||
"id": 564,
|
||||
"options": {
|
||||
@@ -2205,7 +2367,7 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 81
|
||||
"y": 89
|
||||
},
|
||||
"id": 555,
|
||||
"panels": [],
|
||||
@@ -2262,7 +2424,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 82
|
||||
"y": 90
|
||||
},
|
||||
"id": 554,
|
||||
"options": {
|
||||
@@ -2358,7 +2520,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 82
|
||||
"y": 90
|
||||
},
|
||||
"id": 556,
|
||||
"options": {
|
||||
@@ -2453,7 +2615,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 90
|
||||
"y": 98
|
||||
},
|
||||
"id": 563,
|
||||
"options": {
|
||||
@@ -2535,7 +2697,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 90
|
||||
"y": 98
|
||||
},
|
||||
"id": 560,
|
||||
"options": {
|
||||
@@ -2617,7 +2779,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 98
|
||||
"y": 106
|
||||
},
|
||||
"id": 558,
|
||||
"options": {
|
||||
@@ -2699,7 +2861,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 98
|
||||
"y": 106
|
||||
},
|
||||
"id": 559,
|
||||
"options": {
|
||||
@@ -2781,7 +2943,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 106
|
||||
"y": 114
|
||||
},
|
||||
"id": 557,
|
||||
"options": {
|
||||
@@ -2876,7 +3038,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 106
|
||||
"y": 114
|
||||
},
|
||||
"id": 562,
|
||||
"options": {
|
||||
@@ -2958,7 +3120,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 114
|
||||
"y": 122
|
||||
},
|
||||
"id": 561,
|
||||
"options": {
|
||||
@@ -2996,7 +3158,7 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 122
|
||||
"y": 130
|
||||
},
|
||||
"id": 550,
|
||||
"panels": [],
|
||||
@@ -3053,7 +3215,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 123
|
||||
"y": 131
|
||||
},
|
||||
"id": 551,
|
||||
"options": {
|
||||
@@ -3200,7 +3362,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 123
|
||||
"y": 131
|
||||
},
|
||||
"id": 552,
|
||||
"options": {
|
||||
@@ -3282,7 +3444,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 131
|
||||
"y": 139
|
||||
},
|
||||
"id": 553,
|
||||
"options": {
|
||||
@@ -3320,7 +3482,7 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 139
|
||||
"y": 147
|
||||
},
|
||||
"id": 565,
|
||||
"panels": [],
|
||||
@@ -3377,7 +3539,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 140
|
||||
"y": 148
|
||||
},
|
||||
"id": 566,
|
||||
"options": {
|
||||
@@ -3472,7 +3634,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 140
|
||||
"y": 148
|
||||
},
|
||||
"id": 571,
|
||||
"options": {
|
||||
@@ -3593,7 +3755,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 148
|
||||
"y": 156
|
||||
},
|
||||
"id": 569,
|
||||
"options": {
|
||||
@@ -3675,7 +3837,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 148
|
||||
"y": 156
|
||||
},
|
||||
"id": 570,
|
||||
"options": {
|
||||
@@ -3757,7 +3919,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 156
|
||||
"y": 164
|
||||
},
|
||||
"id": 567,
|
||||
"options": {
|
||||
@@ -3839,7 +4001,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 156
|
||||
"y": 164
|
||||
},
|
||||
"id": 568,
|
||||
"options": {
|
||||
|
||||
@@ -2778,6 +2778,88 @@
|
||||
"title": "Gossip Aggregate And Proof Error",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 94
|
||||
},
|
||||
"id": 510,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rate(lodestar_gossip_validation_error_total{topic=\"data_column_sidecar\"}[$rate_interval])",
|
||||
"instant": false,
|
||||
"legendFormat": "{{error}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Gossip DataColumnSidecar Error",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"datasource": {
|
||||
|
||||
@@ -6163,7 +6163,6 @@
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"graph": false,
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
@@ -6185,8 +6184,7 @@
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"unit": "none"
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -6196,7 +6194,7 @@
|
||||
"x": 12,
|
||||
"y": 253
|
||||
},
|
||||
"id": 606,
|
||||
"id": 629,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
@@ -6205,7 +6203,7 @@
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"mode": "multi",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
@@ -6216,15 +6214,14 @@
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "60 * (\n rate(beacon_reqresp_outgoing_request_roundtrip_time_seconds_count [$rate_interval])\n - on(method)\n rate(beacon_reqresp_outgoing_request_roundtrip_time_seconds_bucket{le=\"5\"} [$rate_interval])\n)",
|
||||
"interval": "",
|
||||
"legendFormat": "{{method}}",
|
||||
"expr": "rate(beacon_reqresp_outgoing_requests_error_reason_total[$rate_interval])",
|
||||
"instant": false,
|
||||
"legendFormat": "{{reason}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Outgoing request roundtrip time > 5 sec (req / min)",
|
||||
"title": "Outgoing request error rate by reason",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -6354,7 +6351,7 @@
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"unit": "s"
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -6364,7 +6361,7 @@
|
||||
"x": 12,
|
||||
"y": 261
|
||||
},
|
||||
"id": 500,
|
||||
"id": 606,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
@@ -6383,14 +6380,16 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "rate(beacon_reqresp_outgoing_request_roundtrip_time_seconds_sum[$rate_interval])\n/\nrate(beacon_reqresp_outgoing_request_roundtrip_time_seconds_count[$rate_interval])",
|
||||
"expr": "60 * (\n rate(beacon_reqresp_outgoing_request_roundtrip_time_seconds_count [$rate_interval])\n - on(method)\n rate(beacon_reqresp_outgoing_request_roundtrip_time_seconds_bucket{le=\"5\"} [$rate_interval])\n)",
|
||||
"interval": "",
|
||||
"legendFormat": "{{method}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Outgoing request roundtrip time avg",
|
||||
"title": "Outgoing request roundtrip time > 5 sec (req / min)",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
@@ -6520,7 +6519,7 @@
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"unit": "none"
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
@@ -6530,6 +6529,89 @@
|
||||
"x": 12,
|
||||
"y": 269
|
||||
},
|
||||
"id": 500,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"exemplar": false,
|
||||
"expr": "rate(beacon_reqresp_outgoing_request_roundtrip_time_seconds_sum[$rate_interval])\n/\nrate(beacon_reqresp_outgoing_request_roundtrip_time_seconds_count[$rate_interval])",
|
||||
"interval": "",
|
||||
"legendFormat": "{{method}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Outgoing request roundtrip time avg",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"graph": false,
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"unit": "none"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 277
|
||||
},
|
||||
"id": 501,
|
||||
"options": {
|
||||
"legend": {
|
||||
|
||||
1957
dashboards/lodestar_peerdas.json
Normal file
1957
dashboards/lodestar_peerdas.json
Normal file
File diff suppressed because it is too large
Load Diff
203
docs/pages/contribution/testing/kurtosis.md
Normal file
203
docs/pages/contribution/testing/kurtosis.md
Normal file
@@ -0,0 +1,203 @@
|
||||
# Kurtosis Scripts for Lodestar
|
||||
|
||||
This directory contains scripts and configurations for running Lodestar testnets using [Kurtosis](https://www.kurtosis.com/).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Install Kurtosis: https://docs.kurtosis.com/install
|
||||
2. Install Docker: https://docs.docker.com/get-docker/
|
||||
|
||||
## Quick Start
|
||||
|
||||
From the Lodestar root directory:
|
||||
|
||||
```bash
|
||||
# Start a testnet (automatically builds local Docker image)
|
||||
./scripts/kurtosis/run.sh start
|
||||
|
||||
# View running services and find service names
|
||||
kurtosis enclave inspect lodestar-testnet
|
||||
|
||||
# List just the service names
|
||||
kurtosis enclave inspect lodestar-testnet | grep -E "cl-|el-|vc-" | grep RUNNING
|
||||
|
||||
# Check logs of a specific service (use actual service name from above)
|
||||
# Note: Service names follow pattern: cl-<number>-<client>-<execution-client>
|
||||
# For Lodestar nodes, this will be cl-3-lodestar-geth, cl-4-lodestar-geth, etc.
|
||||
./scripts/kurtosis/run.sh logs cl-3-lodestar-geth
|
||||
|
||||
# Follow logs in real-time
|
||||
./scripts/kurtosis/run.sh logs cl-3-lodestar-geth --follow
|
||||
|
||||
# Stop and clean up the testnet
|
||||
./scripts/kurtosis/run.sh stop
|
||||
|
||||
# Force clean up (if stop fails)
|
||||
./scripts/kurtosis/run.sh clean
|
||||
```
|
||||
|
||||
Or change to the kurtosis directory first:
|
||||
|
||||
```bash
|
||||
cd scripts/kurtosis/
|
||||
|
||||
# Start a testnet
|
||||
./run.sh start
|
||||
|
||||
# Find service names
|
||||
kurtosis enclave inspect lodestar-testnet | grep RUNNING
|
||||
|
||||
# Check logs (replace with actual service name)
|
||||
./run.sh logs cl-3-lodestar-geth
|
||||
|
||||
# Stop
|
||||
./run.sh stop
|
||||
```
|
||||
|
||||
**Note:** The `start` command automatically builds a fresh Docker image from your local Lodestar code before starting the testnet.
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
```bash
|
||||
# Create a custom config
|
||||
cp lodestar.yaml my-custom-config.yaml
|
||||
# Edit my-custom-config.yaml
|
||||
|
||||
# Run with custom config
|
||||
./run.sh start my-custom-config.yaml
|
||||
```
|
||||
|
||||
### Port Forwarding
|
||||
|
||||
Access services running inside Kurtosis:
|
||||
|
||||
```bash
|
||||
# Forward Grafana dashboard
|
||||
./run.sh port-forward grafana 3000
|
||||
|
||||
# Forward Lodestar REST API
|
||||
./run.sh port-forward cl-1-lodestar 9596
|
||||
```
|
||||
|
||||
### Custom Enclave Name
|
||||
|
||||
Run multiple testnets simultaneously:
|
||||
|
||||
```bash
|
||||
./run.sh -e testnet-1 start
|
||||
./run.sh -e testnet-2 start config-2.yaml
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Finding Service Names
|
||||
|
||||
The testnet runs multiple services. To find the correct service name for logs:
|
||||
|
||||
```bash
|
||||
# Show all services with their names
|
||||
kurtosis enclave inspect lodestar-testnet | grep -A 1 "User Services" | grep -E "cl-|el-|vc-"
|
||||
|
||||
# Example output:
|
||||
# cl-1-lighthouse-geth (Lighthouse consensus client)
|
||||
# cl-2-lighthouse-geth (Lighthouse consensus client)
|
||||
# cl-3-lodestar-geth (Lodestar consensus client)
|
||||
# cl-4-lodestar-geth (Lodestar consensus client)
|
||||
# el-1-geth-lighthouse (Geth execution client)
|
||||
# el-2-geth-lighthouse (Geth execution client)
|
||||
```
|
||||
|
||||
Service naming pattern:
|
||||
|
||||
- `cl-` = Consensus Layer
|
||||
- `el-` = Execution Layer
|
||||
- `vc-` = Validator Client
|
||||
- Format: `<layer>-<number>-<client>-<paired-client>`
|
||||
|
||||
### Check Service Status
|
||||
|
||||
```bash
|
||||
kurtosis service inspect lodestar-testnet <service-name>
|
||||
```
|
||||
|
||||
### Debug Failed Services
|
||||
|
||||
```bash
|
||||
# Check logs of a failed service
|
||||
kurtosis service logs lodestar-testnet <service-name>
|
||||
|
||||
# Get a shell inside a service container
|
||||
kurtosis service shell lodestar-testnet <service-name>
|
||||
```
|
||||
|
||||
### Clean Up Stuck Enclaves
|
||||
|
||||
```bash
|
||||
# Force remove an enclave
|
||||
kurtosis enclave rm -f lodestar-testnet
|
||||
|
||||
# Remove all enclaves
|
||||
kurtosis clean -a
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
The configuration files use the [ethereum-package](https://github.com/ethpandaops/ethereum-package) format. Key options include:
|
||||
|
||||
- `participants`: Define execution and consensus layer nodes
|
||||
- `network_params`: Network configuration (preset, fork epochs, etc.)
|
||||
- `additional_services`: Enable monitoring, transaction spammers, etc.
|
||||
|
||||
See the [ethereum-package documentation](https://github.com/ethpandaops/ethereum-package) for all available options.
|
||||
|
||||
## Examples
|
||||
|
||||
### Minimal Testnet
|
||||
|
||||
```yaml
|
||||
participants:
|
||||
- el_type: geth
|
||||
count: 1
|
||||
- cl_type: lodestar
|
||||
count: 1
|
||||
validator_count: 32
|
||||
|
||||
network_params:
|
||||
preset: minimal
|
||||
```
|
||||
|
||||
### Multi-Client Testnet
|
||||
|
||||
```yaml
|
||||
participants:
|
||||
- el_type: geth
|
||||
count: 2
|
||||
- el_type: besu
|
||||
count: 1
|
||||
- cl_type: lodestar
|
||||
count: 2
|
||||
- cl_type: lighthouse
|
||||
count: 1
|
||||
validator_count: 32
|
||||
|
||||
network_params:
|
||||
preset: mainnet
|
||||
```
|
||||
|
||||
### Testnet with Custom Lodestar Flags
|
||||
|
||||
```yaml
|
||||
participants:
|
||||
- el_type: geth
|
||||
count: 2
|
||||
- cl_type: lodestar
|
||||
cl_image: chainsafe/lodestar:latest
|
||||
count: 2
|
||||
cl_extra_params:
|
||||
- "--metrics"
|
||||
- "--metrics.port=8008"
|
||||
- "--network.subscribeAllSubnets"
|
||||
validator_count: 32
|
||||
```
|
||||
@@ -114,6 +114,7 @@ const sidebars: SidebarsConfig = {
|
||||
"contribution/testing/index",
|
||||
"contribution/testing/end-to-end-tests",
|
||||
"contribution/testing/integration-tests",
|
||||
"contribution/testing/kurtosis",
|
||||
"contribution/testing/performance-tests",
|
||||
"contribution/testing/simulation-tests",
|
||||
"contribution/testing/spec-tests",
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
],
|
||||
"npmClient": "yarn",
|
||||
"useNx": true,
|
||||
"version": "1.33.0",
|
||||
"version": "1.34.0",
|
||||
"stream": true,
|
||||
"command": {
|
||||
"version": {
|
||||
|
||||
4
lodestar
4
lodestar
@@ -4,4 +4,6 @@
|
||||
#
|
||||
# ./lodestar.sh beacon --network mainnet
|
||||
|
||||
exec node --trace-deprecation --max-old-space-size=8192 ./packages/cli/bin/lodestar.js "$@"
|
||||
SCRIPT_DIR=$(cd "$(dirname -- "$0")" >/dev/null 2>&1 && pwd -P)
|
||||
|
||||
exec node --trace-deprecation --max-old-space-size=8192 "$SCRIPT_DIR/packages/cli/bin/lodestar.js" "$@"
|
||||
|
||||
@@ -42,12 +42,14 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@actions/core": "^1.11.1",
|
||||
"@biomejs/biome": "^2.2.0",
|
||||
"@chainsafe/benchmark": "^1.2.3",
|
||||
"@chainsafe/biomejs-config": "^0.1.2",
|
||||
"@biomejs/biome": "^1.9.4",
|
||||
"@chainsafe/biomejs-config": "^1.0.0",
|
||||
"@types/node": "^20.12.8",
|
||||
"@types/react": "^19.1.12",
|
||||
"@vitest/browser": "3.0.9",
|
||||
"@vitest/coverage-v8": "3.0.9",
|
||||
"bun-types": "^1.2.21",
|
||||
"crypto-browserify": "^3.12.0",
|
||||
"dotenv": "^16.4.5",
|
||||
"electron": "^26.2.2",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"bugs": {
|
||||
"url": "https://github.com/ChainSafe/lodestar/issues"
|
||||
},
|
||||
"version": "1.33.0",
|
||||
"version": "1.34.0",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": {
|
||||
@@ -70,12 +70,12 @@
|
||||
"check-readme": "typescript-docs-verifier"
|
||||
},
|
||||
"dependencies": {
|
||||
"@chainsafe/persistent-merkle-tree": "^1.2.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@lodestar/config": "^1.33.0",
|
||||
"@lodestar/params": "^1.33.0",
|
||||
"@lodestar/types": "^1.33.0",
|
||||
"@lodestar/utils": "^1.33.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.2.1",
|
||||
"@chainsafe/ssz": "^1.2.2",
|
||||
"@lodestar/config": "^1.34.0",
|
||||
"@lodestar/params": "^1.34.0",
|
||||
"@lodestar/types": "^1.34.0",
|
||||
"@lodestar/utils": "^1.34.0",
|
||||
"eventsource": "^2.0.2",
|
||||
"qs": "^6.11.1"
|
||||
},
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
import {ContainerType, ListCompositeType, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ForkName, ForkPreBellatrix, ForkPreElectra, isForkPostBellatrix, isForkPostDeneb} from "@lodestar/params";
|
||||
import {
|
||||
ForkName,
|
||||
ForkPostDeneb,
|
||||
ForkPreBellatrix,
|
||||
ForkPreDeneb,
|
||||
ForkPreElectra,
|
||||
isForkPostBellatrix,
|
||||
isForkPostDeneb,
|
||||
} from "@lodestar/params";
|
||||
import {
|
||||
BeaconBlockBody,
|
||||
RootHex,
|
||||
SignedBeaconBlock,
|
||||
SignedBeaconBlockOrContents,
|
||||
SignedBlindedBeaconBlock,
|
||||
SignedBlockContents,
|
||||
Slot,
|
||||
deneb,
|
||||
isSignedBlockContents,
|
||||
ssz,
|
||||
sszTypesFor,
|
||||
} from "@lodestar/types";
|
||||
@@ -163,7 +169,7 @@ export type Endpoints = {
|
||||
*/
|
||||
publishBlock: Endpoint<
|
||||
"POST",
|
||||
{signedBlockOrContents: SignedBeaconBlockOrContents},
|
||||
{signedBlockContents: SignedBlockContents},
|
||||
{body: unknown; headers: {[MetaHeader.Version]: string}},
|
||||
EmptyResponseData,
|
||||
EmptyMeta
|
||||
@@ -172,7 +178,7 @@ export type Endpoints = {
|
||||
publishBlockV2: Endpoint<
|
||||
"POST",
|
||||
{
|
||||
signedBlockOrContents: SignedBeaconBlockOrContents;
|
||||
signedBlockContents: SignedBlockContents;
|
||||
broadcastValidation?: BroadcastValidation;
|
||||
},
|
||||
{body: unknown; headers: {[MetaHeader.Version]: string}; query: {broadcast_validation?: string}},
|
||||
@@ -220,9 +226,27 @@ export type Endpoints = {
|
||||
deneb.BlobSidecars,
|
||||
ExecutionOptimisticFinalizedAndVersionMeta
|
||||
>;
|
||||
|
||||
/**
|
||||
* Get blobs
|
||||
* Retrieves blobs for a given block id.
|
||||
*/
|
||||
getBlobs: Endpoint<
|
||||
"GET",
|
||||
BlockArgs & {
|
||||
/**
|
||||
* Array of versioned hashes for blobs to request for in the specified block.
|
||||
* Returns all blobs in the block if not specified.
|
||||
*/
|
||||
versionedHashes?: string[];
|
||||
},
|
||||
{params: {block_id: string}; query: {versioned_hashes?: string[]}},
|
||||
deneb.Blobs,
|
||||
ExecutionOptimisticAndFinalizedMeta
|
||||
>;
|
||||
};
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
// biome-ignore lint/suspicious/noExplicitAny: Return type has to match multiple routes and we only care about request type here
|
||||
const blockIdOnlyReq: RequestCodec<Endpoint<"GET", {blockId: BlockId}, {params: {block_id: string}}, any, any>> = {
|
||||
writeReq: ({blockId}) => ({params: {block_id: blockId.toString()}}),
|
||||
parseReq: ({params}) => ({blockId: params.block_id}),
|
||||
@@ -304,16 +328,16 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions<Endpoi
|
||||
url: "/eth/v1/beacon/blocks",
|
||||
method: "POST",
|
||||
req: {
|
||||
writeReqJson: ({signedBlockOrContents}) => {
|
||||
const slot = isSignedBlockContents(signedBlockOrContents)
|
||||
? signedBlockOrContents.signedBlock.message.slot
|
||||
: signedBlockOrContents.message.slot;
|
||||
writeReqJson: ({signedBlockContents}) => {
|
||||
const slot = signedBlockContents.signedBlock.message.slot;
|
||||
const fork = config.getForkName(slot);
|
||||
|
||||
return {
|
||||
body: isForkPostDeneb(fork)
|
||||
? sszTypesFor(fork).SignedBlockContents.toJson(signedBlockOrContents as SignedBlockContents)
|
||||
: sszTypesFor(fork).SignedBeaconBlock.toJson(signedBlockOrContents as SignedBeaconBlock),
|
||||
? sszTypesFor(fork).SignedBlockContents.toJson(signedBlockContents as SignedBlockContents<ForkPostDeneb>)
|
||||
: sszTypesFor(fork).SignedBeaconBlock.toJson(
|
||||
signedBlockContents.signedBlock as SignedBeaconBlock<ForkPreDeneb>
|
||||
),
|
||||
headers: {
|
||||
[MetaHeader.Version]: config.getForkName(slot),
|
||||
},
|
||||
@@ -334,21 +358,23 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions<Endpoi
|
||||
);
|
||||
}
|
||||
return {
|
||||
signedBlockOrContents: isForkPostDeneb(forkName)
|
||||
signedBlockContents: isForkPostDeneb(forkName)
|
||||
? sszTypesFor(forkName).SignedBlockContents.fromJson(body)
|
||||
: ssz[forkName].SignedBeaconBlock.fromJson(body),
|
||||
: {signedBlock: ssz[forkName].SignedBeaconBlock.fromJson(body)},
|
||||
};
|
||||
},
|
||||
writeReqSsz: ({signedBlockOrContents}) => {
|
||||
const slot = isSignedBlockContents(signedBlockOrContents)
|
||||
? signedBlockOrContents.signedBlock.message.slot
|
||||
: signedBlockOrContents.message.slot;
|
||||
writeReqSsz: ({signedBlockContents}) => {
|
||||
const slot = signedBlockContents.signedBlock.message.slot;
|
||||
const fork = config.getForkName(slot);
|
||||
|
||||
return {
|
||||
body: isForkPostDeneb(fork)
|
||||
? sszTypesFor(fork).SignedBlockContents.serialize(signedBlockOrContents as SignedBlockContents)
|
||||
: sszTypesFor(fork).SignedBeaconBlock.serialize(signedBlockOrContents as SignedBeaconBlock),
|
||||
? sszTypesFor(fork).SignedBlockContents.serialize(
|
||||
signedBlockContents as SignedBlockContents<ForkPostDeneb>
|
||||
)
|
||||
: sszTypesFor(fork).SignedBeaconBlock.serialize(
|
||||
signedBlockContents.signedBlock as SignedBeaconBlock<ForkPreDeneb>
|
||||
),
|
||||
headers: {
|
||||
[MetaHeader.Version]: config.getForkName(slot),
|
||||
},
|
||||
@@ -357,9 +383,9 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions<Endpoi
|
||||
parseReqSsz: ({body, headers}) => {
|
||||
const forkName = toForkName(fromHeaders(headers, MetaHeader.Version));
|
||||
return {
|
||||
signedBlockOrContents: isForkPostDeneb(forkName)
|
||||
signedBlockContents: isForkPostDeneb(forkName)
|
||||
? sszTypesFor(forkName).SignedBlockContents.deserialize(body)
|
||||
: ssz[forkName].SignedBeaconBlock.deserialize(body),
|
||||
: {signedBlock: ssz[forkName].SignedBeaconBlock.deserialize(body)},
|
||||
};
|
||||
},
|
||||
schema: {
|
||||
@@ -376,15 +402,15 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions<Endpoi
|
||||
url: "/eth/v2/beacon/blocks",
|
||||
method: "POST",
|
||||
req: {
|
||||
writeReqJson: ({signedBlockOrContents, broadcastValidation}) => {
|
||||
const slot = isSignedBlockContents(signedBlockOrContents)
|
||||
? signedBlockOrContents.signedBlock.message.slot
|
||||
: signedBlockOrContents.message.slot;
|
||||
writeReqJson: ({signedBlockContents, broadcastValidation}) => {
|
||||
const slot = signedBlockContents.signedBlock.message.slot;
|
||||
const fork = config.getForkName(slot);
|
||||
return {
|
||||
body: isForkPostDeneb(fork)
|
||||
? sszTypesFor(fork).SignedBlockContents.toJson(signedBlockOrContents as SignedBlockContents)
|
||||
: sszTypesFor(fork).SignedBeaconBlock.toJson(signedBlockOrContents as SignedBeaconBlock),
|
||||
? sszTypesFor(fork).SignedBlockContents.toJson(signedBlockContents as SignedBlockContents<ForkPostDeneb>)
|
||||
: sszTypesFor(fork).SignedBeaconBlock.toJson(
|
||||
signedBlockContents.signedBlock as SignedBeaconBlock<ForkPreDeneb>
|
||||
),
|
||||
headers: {
|
||||
[MetaHeader.Version]: fork,
|
||||
},
|
||||
@@ -394,22 +420,24 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions<Endpoi
|
||||
parseReqJson: ({body, headers, query}) => {
|
||||
const forkName = toForkName(fromHeaders(headers, MetaHeader.Version));
|
||||
return {
|
||||
signedBlockOrContents: isForkPostDeneb(forkName)
|
||||
signedBlockContents: isForkPostDeneb(forkName)
|
||||
? sszTypesFor(forkName).SignedBlockContents.fromJson(body)
|
||||
: ssz[forkName].SignedBeaconBlock.fromJson(body),
|
||||
: {signedBlock: ssz[forkName].SignedBeaconBlock.fromJson(body)},
|
||||
broadcastValidation: query.broadcast_validation as BroadcastValidation,
|
||||
};
|
||||
},
|
||||
writeReqSsz: ({signedBlockOrContents, broadcastValidation}) => {
|
||||
const slot = isSignedBlockContents(signedBlockOrContents)
|
||||
? signedBlockOrContents.signedBlock.message.slot
|
||||
: signedBlockOrContents.message.slot;
|
||||
writeReqSsz: ({signedBlockContents, broadcastValidation}) => {
|
||||
const slot = signedBlockContents.signedBlock.message.slot;
|
||||
const fork = config.getForkName(slot);
|
||||
|
||||
return {
|
||||
body: isForkPostDeneb(fork)
|
||||
? sszTypesFor(fork).SignedBlockContents.serialize(signedBlockOrContents as SignedBlockContents)
|
||||
: sszTypesFor(fork).SignedBeaconBlock.serialize(signedBlockOrContents as SignedBeaconBlock),
|
||||
? sszTypesFor(fork).SignedBlockContents.serialize(
|
||||
signedBlockContents as SignedBlockContents<ForkPostDeneb>
|
||||
)
|
||||
: sszTypesFor(fork).SignedBeaconBlock.serialize(
|
||||
signedBlockContents.signedBlock as SignedBeaconBlock<ForkPreDeneb>
|
||||
),
|
||||
headers: {
|
||||
[MetaHeader.Version]: fork,
|
||||
},
|
||||
@@ -419,9 +447,9 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions<Endpoi
|
||||
parseReqSsz: ({body, headers, query}) => {
|
||||
const forkName = toForkName(fromHeaders(headers, MetaHeader.Version));
|
||||
return {
|
||||
signedBlockOrContents: isForkPostDeneb(forkName)
|
||||
signedBlockContents: isForkPostDeneb(forkName)
|
||||
? sszTypesFor(forkName).SignedBlockContents.deserialize(body)
|
||||
: ssz[forkName].SignedBeaconBlock.deserialize(body),
|
||||
: {signedBlock: ssz[forkName].SignedBeaconBlock.deserialize(body)},
|
||||
broadcastValidation: query.broadcast_validation as BroadcastValidation,
|
||||
};
|
||||
},
|
||||
@@ -551,5 +579,24 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions<Endpoi
|
||||
meta: ExecutionOptimisticFinalizedAndVersionCodec,
|
||||
},
|
||||
},
|
||||
getBlobs: {
|
||||
url: "/eth/v1/beacon/blobs/{block_id}",
|
||||
method: "GET",
|
||||
req: {
|
||||
writeReq: ({blockId, versionedHashes}) => ({
|
||||
params: {block_id: blockId.toString()},
|
||||
query: {versioned_hashes: versionedHashes},
|
||||
}),
|
||||
parseReq: ({params, query}) => ({
|
||||
blockId: params.block_id,
|
||||
versionedHashes: query.versioned_hashes?.map((hash) => hash.toLowerCase()),
|
||||
}),
|
||||
schema: {params: {block_id: Schema.StringRequired}, query: {versioned_hashes: Schema.StringArray}},
|
||||
},
|
||||
resp: {
|
||||
data: ssz.deneb.Blobs,
|
||||
meta: ExecutionOptimisticAndFinalizedCodec,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -77,7 +77,6 @@ export const EpochSyncCommitteeResponseType = new ContainerType(
|
||||
{
|
||||
/** All of the validator indices in the current sync committee */
|
||||
validators: ArrayOf(ssz.ValidatorIndex),
|
||||
// TODO: This property will likely be deprecated
|
||||
/** Subcommittee slices of the current sync committee */
|
||||
validatorAggregates: ArrayOf(ArrayOf(ssz.ValidatorIndex)),
|
||||
},
|
||||
@@ -334,7 +333,7 @@ export type Endpoints = {
|
||||
>;
|
||||
};
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
// biome-ignore lint/suspicious/noExplicitAny: We need to use `any` type here
|
||||
const stateIdOnlyReq: RequestCodec<Endpoint<"GET", {stateId: StateId}, {params: {state_id: string}}, any, any>> = {
|
||||
writeReq: ({stateId}) => ({params: {state_id: stateId.toString()}}),
|
||||
parseReq: ({params}) => ({stateId: params.state_id}),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import {ContainerType, Type, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {BeaconState, StringType, ssz} from "@lodestar/types";
|
||||
import {BeaconState, StringType, fulu, ssz} from "@lodestar/types";
|
||||
import {
|
||||
ArrayOf,
|
||||
EmptyArgs,
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
import {Schema} from "../../utils/schema.js";
|
||||
import {Endpoint, RouteDefinitions} from "../../utils/types.js";
|
||||
import {WireFormat} from "../../utils/wireFormat.js";
|
||||
import {BlockArgs} from "./beacon/block.js";
|
||||
import {StateArgs} from "./beacon/state.js";
|
||||
|
||||
const stringType = new StringType();
|
||||
@@ -133,6 +134,25 @@ export type Endpoints = {
|
||||
BeaconState,
|
||||
ExecutionOptimisticFinalizedAndVersionMeta
|
||||
>;
|
||||
|
||||
/**
|
||||
* Get data column sidecars
|
||||
* Retrieves data column sidecars for a given block id.
|
||||
*/
|
||||
getDebugDataColumnSidecars: Endpoint<
|
||||
"GET",
|
||||
BlockArgs & {
|
||||
/**
|
||||
* Array of indices for data column sidecars to request for in the specified block.
|
||||
* This endpoint will only return columns that the node is actually custodying.
|
||||
* If not specified, returns all data column sidecars that this node is custodying in the block.
|
||||
*/
|
||||
indices?: number[];
|
||||
},
|
||||
{params: {block_id: string}; query: {indices?: number[]}},
|
||||
fulu.DataColumnSidecars,
|
||||
ExecutionOptimisticFinalizedAndVersionMeta
|
||||
>;
|
||||
};
|
||||
|
||||
export function getDefinitions(_config: ChainForkConfig): RouteDefinitions<Endpoints> {
|
||||
@@ -194,5 +214,18 @@ export function getDefinitions(_config: ChainForkConfig): RouteDefinitions<Endpo
|
||||
timeoutMs: 5 * 60 * 1000,
|
||||
},
|
||||
},
|
||||
getDebugDataColumnSidecars: {
|
||||
url: "/eth/v1/debug/beacon/data_column_sidecars/{block_id}",
|
||||
method: "GET",
|
||||
req: {
|
||||
writeReq: ({blockId, indices}) => ({params: {block_id: blockId.toString()}, query: {indices}}),
|
||||
parseReq: ({params, query}) => ({blockId: params.block_id, indices: query.indices}),
|
||||
schema: {params: {block_id: Schema.StringRequired}, query: {indices: Schema.UintArray}},
|
||||
},
|
||||
resp: {
|
||||
data: ssz.fulu.DataColumnSidecars,
|
||||
meta: ExecutionOptimisticFinalizedAndVersionCodec,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import {ContainerType, ValueOf} from "@chainsafe/ssz";
|
||||
import {ContainerType, ListBasicType, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ForkName} from "@lodestar/params";
|
||||
import {ForkName, MAX_BLOB_COMMITMENTS_PER_BLOCK} from "@lodestar/params";
|
||||
import {
|
||||
Attestation,
|
||||
AttesterSlashing,
|
||||
@@ -38,6 +38,17 @@ export const blobSidecarSSE = new ContainerType(
|
||||
);
|
||||
type BlobSidecarSSE = ValueOf<typeof blobSidecarSSE>;
|
||||
|
||||
export const dataColumnSidecarSSE = new ContainerType(
|
||||
{
|
||||
blockRoot: stringType,
|
||||
index: ssz.ColumnIndex,
|
||||
slot: ssz.Slot,
|
||||
kzgCommitments: new ListBasicType(stringType, MAX_BLOB_COMMITMENTS_PER_BLOCK),
|
||||
},
|
||||
{typeName: "DataColumnSidecarSSE", jsonCase: "eth2"}
|
||||
);
|
||||
type DataColumnSidecarSSE = ValueOf<typeof dataColumnSidecarSSE>;
|
||||
|
||||
export enum EventType {
|
||||
/**
|
||||
* The node has finished processing, resulting in a new head. previous_duty_dependent_root is
|
||||
@@ -76,6 +87,8 @@ export enum EventType {
|
||||
payloadAttributes = "payload_attributes",
|
||||
/** The node has received a valid BlobSidecar (from P2P or API) */
|
||||
blobSidecar = "blob_sidecar",
|
||||
/** The node has received a valid DataColumnSidecar (from P2P or API) */
|
||||
dataColumnSidecar = "data_column_sidecar",
|
||||
}
|
||||
|
||||
export const eventTypes: {[K in EventType]: K} = {
|
||||
@@ -95,6 +108,7 @@ export const eventTypes: {[K in EventType]: K} = {
|
||||
[EventType.lightClientFinalityUpdate]: EventType.lightClientFinalityUpdate,
|
||||
[EventType.payloadAttributes]: EventType.payloadAttributes,
|
||||
[EventType.blobSidecar]: EventType.blobSidecar,
|
||||
[EventType.dataColumnSidecar]: EventType.dataColumnSidecar,
|
||||
};
|
||||
|
||||
export type EventData = {
|
||||
@@ -143,6 +157,7 @@ export type EventData = {
|
||||
[EventType.lightClientFinalityUpdate]: {version: ForkName; data: LightClientFinalityUpdate};
|
||||
[EventType.payloadAttributes]: {version: ForkName; data: SSEPayloadAttributes};
|
||||
[EventType.blobSidecar]: BlobSidecarSSE;
|
||||
[EventType.dataColumnSidecar]: DataColumnSidecarSSE;
|
||||
};
|
||||
|
||||
export type BeaconEvent = {[K in EventType]: {type: K; message: EventData[K]}}[EventType];
|
||||
@@ -296,6 +311,7 @@ export function getTypeByEvent(config: ChainForkConfig): {[K in EventType]: Type
|
||||
[EventType.contributionAndProof]: ssz.altair.SignedContributionAndProof,
|
||||
[EventType.payloadAttributes]: WithVersion((fork) => getPostBellatrixForkTypes(fork).SSEPayloadAttributes),
|
||||
[EventType.blobSidecar]: blobSidecarSSE,
|
||||
[EventType.dataColumnSidecar]: dataColumnSidecarSSE,
|
||||
|
||||
[EventType.lightClientOptimisticUpdate]: WithVersion(
|
||||
(fork) => getPostAltairForkTypes(fork).LightClientOptimisticUpdate
|
||||
|
||||
@@ -26,7 +26,7 @@ export type SyncChainDebugState = {
|
||||
status: string;
|
||||
startEpoch: number;
|
||||
peers: number;
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
// biome-ignore lint/suspicious/noExplicitAny: We need to use `any` type here
|
||||
batches: any[];
|
||||
};
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import {ContainerType, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ssz, stringType} from "@lodestar/types";
|
||||
import {fulu, ssz, stringType} from "@lodestar/types";
|
||||
import {
|
||||
ArrayOf,
|
||||
EmptyArgs,
|
||||
@@ -24,6 +24,7 @@ export const NetworkIdentityType = new ContainerType(
|
||||
enr: stringType,
|
||||
p2pAddresses: ArrayOf(stringType),
|
||||
discoveryAddresses: ArrayOf(stringType),
|
||||
// TODO Fulu: replace with `ssz.fulu.Metadata` once `custody_group_count` is more widely supported
|
||||
/** Based on Ethereum Consensus [Metadata object](https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/p2p-interface.md#metadata) */
|
||||
metadata: ssz.altair.Metadata,
|
||||
},
|
||||
@@ -56,7 +57,9 @@ export const SyncingStatusType = new ContainerType(
|
||||
{jsonCase: "eth2"}
|
||||
);
|
||||
|
||||
export type NetworkIdentity = ValueOf<typeof NetworkIdentityType>;
|
||||
export type NetworkIdentity = ValueOf<typeof NetworkIdentityType> & {
|
||||
metadata: Partial<fulu.Metadata>;
|
||||
};
|
||||
|
||||
export type PeerState = "disconnected" | "connecting" | "connected" | "disconnecting";
|
||||
export type PeerDirection = "inbound" | "outbound";
|
||||
@@ -190,7 +193,26 @@ export function getDefinitions(_config: ChainForkConfig): RouteDefinitions<Endpo
|
||||
req: EmptyRequestCodec,
|
||||
resp: {
|
||||
onlySupport: WireFormat.json,
|
||||
data: NetworkIdentityType,
|
||||
// TODO Fulu: clean this up
|
||||
data: {
|
||||
...JsonOnlyResponseCodec.data,
|
||||
toJson: (data) => {
|
||||
const json = NetworkIdentityType.toJson(data);
|
||||
const {custodyGroupCount} = data.metadata;
|
||||
(json as {metadata: {custody_group_count: string | undefined}}).metadata.custody_group_count =
|
||||
custodyGroupCount !== undefined ? String(custodyGroupCount) : undefined;
|
||||
return json;
|
||||
},
|
||||
fromJson: (json) => {
|
||||
const data = NetworkIdentityType.fromJson(json);
|
||||
const {
|
||||
metadata: {custody_group_count},
|
||||
} = json as {metadata: {custody_group_count: string | undefined}};
|
||||
(data.metadata as Partial<fulu.Metadata>).custodyGroupCount =
|
||||
custody_group_count !== undefined ? parseInt(custody_group_count) : undefined;
|
||||
return data;
|
||||
},
|
||||
},
|
||||
meta: EmptyMetaCodec,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
import {ContainerType, Type, ValueOf} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {VALIDATOR_REGISTRY_LIMIT, isForkPostDeneb, isForkPostElectra} from "@lodestar/params";
|
||||
import {
|
||||
ForkPostDeneb,
|
||||
ForkPreDeneb,
|
||||
VALIDATOR_REGISTRY_LIMIT,
|
||||
isForkPostDeneb,
|
||||
isForkPostElectra,
|
||||
} from "@lodestar/params";
|
||||
import {
|
||||
Attestation,
|
||||
BLSSignature,
|
||||
BeaconBlockOrContents,
|
||||
BeaconBlock,
|
||||
BlindedBeaconBlock,
|
||||
BlockContents,
|
||||
CommitteeIndex,
|
||||
Epoch,
|
||||
ProducedBlockSource,
|
||||
@@ -27,7 +34,6 @@ import {
|
||||
EmptyResponseCodec,
|
||||
EmptyResponseData,
|
||||
JsonOnlyReq,
|
||||
WithMeta,
|
||||
WithVersion,
|
||||
} from "../../utils/codecs.js";
|
||||
import {getPostBellatrixForkTypes, toForkName} from "../../utils/fork.js";
|
||||
@@ -320,7 +326,7 @@ export type Endpoints = {
|
||||
blinded_local?: boolean;
|
||||
};
|
||||
},
|
||||
BeaconBlockOrContents | BlindedBeaconBlock,
|
||||
BlockContents | BlindedBeaconBlock,
|
||||
ProduceBlockV3Meta
|
||||
>;
|
||||
|
||||
@@ -630,14 +636,43 @@ export function getDefinitions(config: ChainForkConfig): RouteDefinitions<Endpoi
|
||||
},
|
||||
},
|
||||
resp: {
|
||||
data: WithMeta(
|
||||
({version, executionPayloadBlinded}) =>
|
||||
(executionPayloadBlinded
|
||||
? getPostBellatrixForkTypes(version).BlindedBeaconBlock
|
||||
// The spec defines the response as `preDeneb.BeaconBlock | postDeneb.BlockContents`
|
||||
// We represent the response as `{block: preDeneb.BeaconBlock} | postDeneb.BlockContents` (aka BlockContents in our codebase)
|
||||
// Due to this discripancy, we require a hand-written codec to handle the transformation.
|
||||
data: {
|
||||
toJson(data, {executionPayloadBlinded, version}) {
|
||||
return executionPayloadBlinded
|
||||
? getPostBellatrixForkTypes(version).BlindedBeaconBlock.toJson(data as BlindedBeaconBlock)
|
||||
: isForkPostDeneb(version)
|
||||
? sszTypesFor(version).BlockContents
|
||||
: ssz[version].BeaconBlock) as Type<BeaconBlockOrContents | BlindedBeaconBlock>
|
||||
),
|
||||
? sszTypesFor(version).BlockContents.toJson(data as BlockContents<ForkPostDeneb>)
|
||||
: (ssz[version].BeaconBlock as Type<BeaconBlock<ForkPreDeneb>>).toJson(
|
||||
(data as BlockContents).block as BeaconBlock<ForkPreDeneb> // <- tranformation
|
||||
);
|
||||
},
|
||||
fromJson(data, {executionPayloadBlinded, version}) {
|
||||
return executionPayloadBlinded
|
||||
? getPostBellatrixForkTypes(version).BlindedBeaconBlock.fromJson(data)
|
||||
: isForkPostDeneb(version)
|
||||
? sszTypesFor(version).BlockContents.fromJson(data)
|
||||
: {block: ssz[version].BeaconBlock.fromJson(data)}; // <- tranformation
|
||||
},
|
||||
serialize(data, {executionPayloadBlinded, version}) {
|
||||
return executionPayloadBlinded
|
||||
? getPostBellatrixForkTypes(version).BlindedBeaconBlock.serialize(data as BlindedBeaconBlock)
|
||||
: isForkPostDeneb(version)
|
||||
? sszTypesFor(version).BlockContents.serialize(data as BlockContents<ForkPostDeneb>)
|
||||
: (ssz[version].BeaconBlock as Type<BeaconBlock<ForkPreDeneb>>).serialize(
|
||||
(data as BlockContents).block as BeaconBlock<ForkPreDeneb> // <- tranformation
|
||||
);
|
||||
},
|
||||
deserialize(data, {executionPayloadBlinded, version}) {
|
||||
return executionPayloadBlinded
|
||||
? getPostBellatrixForkTypes(version).BlindedBeaconBlock.deserialize(data)
|
||||
: isForkPostDeneb(version)
|
||||
? sszTypesFor(version).BlockContents.deserialize(data)
|
||||
: {block: ssz[version].BeaconBlock.deserialize(data)}; // <- tranformation
|
||||
},
|
||||
},
|
||||
meta: {
|
||||
toJson: (meta) => ({
|
||||
...ProduceBlockV3MetaType.toJson(meta),
|
||||
|
||||
@@ -25,7 +25,7 @@ export function registerRoutes(
|
||||
// Enforces that we are declaring routes for every routeId in `Endpoints`
|
||||
[K in keyof Endpoints]: () => {
|
||||
// The Endpoints are enforced in each getRoutes return type
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
// biome-ignore lint/suspicious/noExplicitAny: We need to use `any` type here
|
||||
[K2 in keyof Endpoints[K]]: FastifyRoute<any>;
|
||||
};
|
||||
} = {
|
||||
|
||||
@@ -20,7 +20,7 @@ import {testData as validatorTestData} from "./testData/validator.js";
|
||||
// Solutions: https://stackoverflow.com/questions/46745014/alternative-for-dirname-in-node-js-when-using-es6-modules
|
||||
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
const version = "v3.1.0";
|
||||
const version = "v4.0.0-alpha.1";
|
||||
const openApiFile: OpenApiFile = {
|
||||
url: `https://github.com/ethereum/beacon-APIs/releases/download/${version}/beacon-node-oapi.json`,
|
||||
filepath: path.join(__dirname, "../../../oapi-schemas/beacon-node-oapi.json"),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import {toHexString} from "@chainsafe/ssz";
|
||||
import {ForkName} from "@lodestar/params";
|
||||
import {ssz} from "@lodestar/types";
|
||||
import {toHex} from "@lodestar/utils";
|
||||
import {
|
||||
BlockHeaderResponse,
|
||||
BroadcastValidation,
|
||||
@@ -13,7 +13,8 @@ const root = new Uint8Array(32).fill(1);
|
||||
const randao = new Uint8Array(32).fill(1);
|
||||
const balance = 32e9;
|
||||
const reward = 32e9;
|
||||
const pubkeyHex = toHexString(Buffer.alloc(48, 1));
|
||||
const pubkeyHex = toHex(Buffer.alloc(48, 1));
|
||||
const versionedHash = ssz.deneb.VersionedHash.defaultValue();
|
||||
|
||||
const blockHeaderResponse: BlockHeaderResponse = {
|
||||
root,
|
||||
@@ -61,7 +62,7 @@ export const testData: GenericServerTestCases<Endpoints> = {
|
||||
res: {data: blockHeaderResponse, meta: {executionOptimistic: true, finalized: false}},
|
||||
},
|
||||
getBlockHeaders: {
|
||||
args: {slot: 1, parentRoot: toHexString(root)},
|
||||
args: {slot: 1, parentRoot: toHex(root)},
|
||||
res: {data: [blockHeaderResponse], meta: {executionOptimistic: true, finalized: false}},
|
||||
},
|
||||
getBlockRoot: {
|
||||
@@ -69,12 +70,12 @@ export const testData: GenericServerTestCases<Endpoints> = {
|
||||
res: {data: {root}, meta: {executionOptimistic: true, finalized: false}},
|
||||
},
|
||||
publishBlock: {
|
||||
args: {signedBlockOrContents: ssz.electra.SignedBlockContents.defaultValue()},
|
||||
args: {signedBlockContents: ssz.electra.SignedBlockContents.defaultValue()},
|
||||
res: undefined,
|
||||
},
|
||||
publishBlockV2: {
|
||||
args: {
|
||||
signedBlockOrContents: ssz.electra.SignedBlockContents.defaultValue(),
|
||||
signedBlockContents: ssz.electra.SignedBlockContents.defaultValue(),
|
||||
broadcastValidation: BroadcastValidation.consensus,
|
||||
},
|
||||
res: undefined,
|
||||
@@ -97,6 +98,13 @@ export const testData: GenericServerTestCases<Endpoints> = {
|
||||
meta: {executionOptimistic: true, finalized: false, version: ForkName.electra},
|
||||
},
|
||||
},
|
||||
getBlobs: {
|
||||
args: {blockId: "head", versionedHashes: [toHex(versionedHash)]},
|
||||
res: {
|
||||
data: [ssz.deneb.Blob.defaultValue()],
|
||||
meta: {executionOptimistic: true, finalized: false},
|
||||
},
|
||||
},
|
||||
|
||||
// pool
|
||||
|
||||
|
||||
@@ -76,4 +76,11 @@ export const testData: GenericServerTestCases<Endpoints> = {
|
||||
meta: {executionOptimistic: true, finalized: false, version: ForkName.electra},
|
||||
},
|
||||
},
|
||||
getDebugDataColumnSidecars: {
|
||||
args: {blockId: "head", indices: [0]},
|
||||
res: {
|
||||
data: [ssz.fulu.DataColumnSidecar.defaultValue()],
|
||||
meta: {executionOptimistic: true, finalized: false, version: ForkName.fulu},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
import {ForkName} from "@lodestar/params";
|
||||
import {ssz} from "@lodestar/types";
|
||||
import {Endpoints, EventData, EventType, blobSidecarSSE} from "../../../../src/beacon/routes/events.js";
|
||||
import {
|
||||
Endpoints,
|
||||
EventData,
|
||||
EventType,
|
||||
blobSidecarSSE,
|
||||
dataColumnSidecarSSE,
|
||||
} from "../../../../src/beacon/routes/events.js";
|
||||
import {GenericServerTestCases} from "../../../utils/genericServerTest.js";
|
||||
|
||||
const abortController = new AbortController();
|
||||
@@ -229,8 +235,8 @@ export const eventTestData: EventData = {
|
||||
}),
|
||||
},
|
||||
[EventType.payloadAttributes]: {
|
||||
version: ForkName.capella,
|
||||
data: ssz.capella.SSEPayloadAttributes.fromJson({
|
||||
version: ForkName.electra,
|
||||
data: ssz.electra.SSEPayloadAttributes.fromJson({
|
||||
proposer_index: "123",
|
||||
proposal_slot: "10",
|
||||
parent_block_number: "9",
|
||||
@@ -248,6 +254,7 @@ export const eventTestData: EventData = {
|
||||
amount: "15640",
|
||||
},
|
||||
],
|
||||
parent_beacon_block_root: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
},
|
||||
}),
|
||||
},
|
||||
@@ -259,4 +266,12 @@ export const eventTestData: EventData = {
|
||||
slot: "1",
|
||||
versioned_hash: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
}),
|
||||
[EventType.dataColumnSidecar]: dataColumnSidecarSSE.fromJson({
|
||||
block_root: "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
index: "1",
|
||||
slot: "1",
|
||||
kzg_commitments: [
|
||||
"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
|
||||
],
|
||||
}),
|
||||
};
|
||||
|
||||
@@ -20,7 +20,7 @@ export const testData: GenericServerTestCases<Endpoints> = {
|
||||
enr: "enr",
|
||||
p2pAddresses: ["p2pAddresses"],
|
||||
discoveryAddresses: ["discoveryAddresses"],
|
||||
metadata: ssz.altair.Metadata.defaultValue(),
|
||||
metadata: ssz.fulu.Metadata.defaultValue(),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"bugs": {
|
||||
"url": "https://github.com/ChainSafe/lodestar/issues"
|
||||
},
|
||||
"version": "1.33.0",
|
||||
"version": "1.34.0",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": {
|
||||
@@ -94,14 +94,14 @@
|
||||
"dependencies": {
|
||||
"@chainsafe/as-sha256": "^1.2.0",
|
||||
"@chainsafe/blst": "^2.2.0",
|
||||
"@chainsafe/discv5": "^11.0.0",
|
||||
"@chainsafe/enr": "^5.0.0",
|
||||
"@chainsafe/discv5": "^11.0.3",
|
||||
"@chainsafe/enr": "^5.0.1",
|
||||
"@chainsafe/libp2p-gossipsub": "^14.1.1",
|
||||
"@chainsafe/libp2p-noise": "^16.1.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.2.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.2.1",
|
||||
"@chainsafe/prometheus-gc-stats": "^1.0.0",
|
||||
"@chainsafe/pubkey-index-map": "^3.0.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@chainsafe/ssz": "^1.2.2",
|
||||
"@chainsafe/threads": "^1.11.2",
|
||||
"@crate-crypto/node-eth-kzg": "0.8.0",
|
||||
"@ethersproject/abi": "^5.7.0",
|
||||
@@ -118,21 +118,21 @@
|
||||
"@libp2p/peer-id": "^5.1.0",
|
||||
"@libp2p/prometheus-metrics": "^4.3.15",
|
||||
"@libp2p/tcp": "^10.1.8",
|
||||
"@lodestar/api": "^1.33.0",
|
||||
"@lodestar/config": "^1.33.0",
|
||||
"@lodestar/db": "^1.33.0",
|
||||
"@lodestar/fork-choice": "^1.33.0",
|
||||
"@lodestar/light-client": "^1.33.0",
|
||||
"@lodestar/logger": "^1.33.0",
|
||||
"@lodestar/params": "^1.33.0",
|
||||
"@lodestar/reqresp": "^1.33.0",
|
||||
"@lodestar/state-transition": "^1.33.0",
|
||||
"@lodestar/types": "^1.33.0",
|
||||
"@lodestar/utils": "^1.33.0",
|
||||
"@lodestar/validator": "^1.33.0",
|
||||
"@lodestar/api": "^1.34.0",
|
||||
"@lodestar/config": "^1.34.0",
|
||||
"@lodestar/db": "^1.34.0",
|
||||
"@lodestar/fork-choice": "^1.34.0",
|
||||
"@lodestar/light-client": "^1.34.0",
|
||||
"@lodestar/logger": "^1.34.0",
|
||||
"@lodestar/params": "^1.34.0",
|
||||
"@lodestar/reqresp": "^1.34.0",
|
||||
"@lodestar/state-transition": "^1.34.0",
|
||||
"@lodestar/types": "^1.34.0",
|
||||
"@lodestar/utils": "^1.34.0",
|
||||
"@lodestar/validator": "^1.34.0",
|
||||
"@multiformats/multiaddr": "^12.1.3",
|
||||
"datastore-core": "^10.0.2",
|
||||
"datastore-level": "^11.0.1",
|
||||
"datastore-level": "^11.0.3",
|
||||
"deepmerge": "^4.3.1",
|
||||
"fastify": "^5.2.1",
|
||||
"interface-datastore": "^8.3.0",
|
||||
@@ -150,7 +150,6 @@
|
||||
"xxhash-wasm": "1.0.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/datastore-level": "^3.0.0",
|
||||
"@types/qs": "^6.9.7",
|
||||
"@types/tmp": "^0.2.3",
|
||||
"it-drain": "^3.0.3",
|
||||
|
||||
@@ -1,46 +1,61 @@
|
||||
import {routes} from "@lodestar/api";
|
||||
import {ApiError, ApplicationMethods} from "@lodestar/api/server";
|
||||
import {
|
||||
ForkName,
|
||||
ForkPostBellatrix,
|
||||
ForkPostFulu,
|
||||
NUMBER_OF_COLUMNS,
|
||||
SLOTS_PER_HISTORICAL_ROOT,
|
||||
isForkPostBellatrix,
|
||||
isForkPostDeneb,
|
||||
isForkPostElectra,
|
||||
isForkPostFulu,
|
||||
} from "@lodestar/params";
|
||||
import {
|
||||
computeEpochAtSlot,
|
||||
computeTimeAtSlot,
|
||||
reconstructFullBlockOrContents,
|
||||
reconstructSignedBlockContents,
|
||||
signedBeaconBlockToBlinded,
|
||||
} from "@lodestar/state-transition";
|
||||
import {
|
||||
ProducedBlockSource,
|
||||
SignedBeaconBlock,
|
||||
SignedBeaconBlockOrContents,
|
||||
SignedBlindedBeaconBlock,
|
||||
SignedBlockContents,
|
||||
WithOptionalBytes,
|
||||
deneb,
|
||||
isSignedBlockContents,
|
||||
fulu,
|
||||
isDenebBlockContents,
|
||||
sszTypesFor,
|
||||
} from "@lodestar/types";
|
||||
import {fromHex, sleep, toHex, toRootHex} from "@lodestar/utils";
|
||||
import {
|
||||
BlobsSource,
|
||||
BlockInput,
|
||||
BlockInputDataBlobs,
|
||||
BlockInputAvailableData,
|
||||
BlockInputBlobs,
|
||||
BlockInputDataColumns,
|
||||
BlockInputType,
|
||||
BlockSource,
|
||||
DataColumnsSource,
|
||||
ImportBlockOpts,
|
||||
getBlockInput,
|
||||
} from "../../../../chain/blocks/types.js";
|
||||
import {verifyBlocksInEpoch} from "../../../../chain/blocks/verifyBlock.js";
|
||||
import {BeaconChain} from "../../../../chain/chain.js";
|
||||
import {BlockError, BlockErrorCode, BlockGossipError} from "../../../../chain/errors/index.js";
|
||||
import {
|
||||
BlockType,
|
||||
ProduceFullBellatrix,
|
||||
ProduceFullDeneb,
|
||||
ProduceFullFulu,
|
||||
} from "../../../../chain/produceBlock/index.js";
|
||||
import {validateGossipBlock} from "../../../../chain/validation/block.js";
|
||||
import {OpSource} from "../../../../chain/validatorMonitor.js";
|
||||
import {NetworkEvent} from "../../../../network/index.js";
|
||||
import {computeBlobSidecars, kzgCommitmentToVersionedHash} from "../../../../util/blobs.js";
|
||||
import {getBlobSidecars, kzgCommitmentToVersionedHash, reconstructBlobs} from "../../../../util/blobs.js";
|
||||
import {getDataColumnSidecarsFromBlock} from "../../../../util/dataColumns.js";
|
||||
import {isOptimisticBlock} from "../../../../util/forkChoice.js";
|
||||
import {kzg} from "../../../../util/kzg.js";
|
||||
import {promiseAllMaybeAsync} from "../../../../util/promises.js";
|
||||
import {ApiModules} from "../../types.js";
|
||||
import {assertUniqueItems} from "../../utils.js";
|
||||
@@ -70,25 +85,60 @@ export function getBeaconBlockApi({
|
||||
"chain" | "config" | "metrics" | "network" | "db"
|
||||
>): ApplicationMethods<routes.beacon.block.Endpoints> {
|
||||
const publishBlock: ApplicationMethods<routes.beacon.block.Endpoints>["publishBlockV2"] = async (
|
||||
{signedBlockOrContents, broadcastValidation},
|
||||
{signedBlockContents, broadcastValidation},
|
||||
_context,
|
||||
opts: PublishBlockOpts = {}
|
||||
) => {
|
||||
const seenTimestampSec = Date.now() / 1000;
|
||||
let blockForImport: BlockInput, signedBlock: SignedBeaconBlock, blobSidecars: deneb.BlobSidecars;
|
||||
const signedBlock = signedBlockContents.signedBlock;
|
||||
const slot = signedBlock.message.slot;
|
||||
const fork = config.getForkName(slot);
|
||||
const blockRoot = toRootHex(chain.config.getForkTypes(slot).BeaconBlock.hashTreeRoot(signedBlock.message));
|
||||
|
||||
let blockForImport: BlockInput, blobSidecars: deneb.BlobSidecars, dataColumnSidecars: fulu.DataColumnSidecars;
|
||||
|
||||
if (isDenebBlockContents(signedBlockContents)) {
|
||||
let blockData: BlockInputAvailableData;
|
||||
if (isForkPostFulu(fork)) {
|
||||
const timer = metrics?.peerDas.dataColumnSidecarComputationTime.startTimer();
|
||||
// If the block was produced by this node, we will already have computed cells
|
||||
// Otherwise, we will compute them from the blobs in this function
|
||||
const cells =
|
||||
(chain.blockProductionCache.get(blockRoot) as ProduceFullFulu)?.cells ??
|
||||
signedBlockContents.blobs.map((blob) => kzg.computeCells(blob));
|
||||
const cellsAndProofs = cells.map((rowCells, rowIndex) => ({
|
||||
cells: rowCells,
|
||||
proofs: signedBlockContents.kzgProofs.slice(rowIndex * NUMBER_OF_COLUMNS, (rowIndex + 1) * NUMBER_OF_COLUMNS),
|
||||
}));
|
||||
dataColumnSidecars = getDataColumnSidecarsFromBlock(
|
||||
config,
|
||||
signedBlock as SignedBeaconBlock<ForkPostFulu>,
|
||||
cellsAndProofs
|
||||
);
|
||||
timer?.();
|
||||
blockData = {
|
||||
fork,
|
||||
dataColumns: dataColumnSidecars,
|
||||
dataColumnsBytes: dataColumnSidecars.map(() => null),
|
||||
dataColumnsSource: DataColumnsSource.api,
|
||||
} as BlockInputDataColumns;
|
||||
blobSidecars = [];
|
||||
} else if (isForkPostDeneb(fork)) {
|
||||
blobSidecars = getBlobSidecars(config, signedBlock, signedBlockContents.blobs, signedBlockContents.kzgProofs);
|
||||
blockData = {
|
||||
fork,
|
||||
blobs: blobSidecars,
|
||||
blobsSource: BlobsSource.api,
|
||||
} as BlockInputBlobs;
|
||||
dataColumnSidecars = [];
|
||||
} else {
|
||||
throw Error(`Invalid data fork=${fork} for publish`);
|
||||
}
|
||||
|
||||
if (isSignedBlockContents(signedBlockOrContents)) {
|
||||
({signedBlock} = signedBlockOrContents);
|
||||
blobSidecars = computeBlobSidecars(config, signedBlock, signedBlockOrContents);
|
||||
const blockData = {
|
||||
fork: config.getForkName(signedBlock.message.slot),
|
||||
blobs: blobSidecars,
|
||||
blobsSource: BlobsSource.api,
|
||||
} as BlockInputDataBlobs;
|
||||
blockForImport = getBlockInput.availableData(config, signedBlock, BlockSource.api, blockData);
|
||||
} else {
|
||||
signedBlock = signedBlockOrContents;
|
||||
blobSidecars = [];
|
||||
dataColumnSidecars = [];
|
||||
blockForImport = getBlockInput.preData(config, signedBlock, BlockSource.api);
|
||||
}
|
||||
|
||||
@@ -97,13 +147,9 @@ export function getBeaconBlockApi({
|
||||
broadcastValidation = broadcastValidation ?? routes.beacon.BroadcastValidation.gossip;
|
||||
// if block is locally produced, full or blinded, it already is 'consensus' validated as it went through
|
||||
// state transition to produce the stateRoot
|
||||
const slot = signedBlock.message.slot;
|
||||
const fork = config.getForkName(slot);
|
||||
const blockRoot = toRootHex(chain.config.getForkTypes(slot).BeaconBlock.hashTreeRoot(signedBlock.message));
|
||||
// bodyRoot should be the same to produced block
|
||||
const bodyRoot = toRootHex(chain.config.getForkTypes(slot).BeaconBlockBody.hashTreeRoot(signedBlock.message.body));
|
||||
const blockLocallyProduced =
|
||||
chain.producedBlockRoot.has(blockRoot) || chain.producedBlindedBlockRoot.has(blockRoot);
|
||||
const blockLocallyProduced = chain.blockProductionCache.has(blockRoot);
|
||||
const valLogMeta = {slot, blockRoot, bodyRoot, broadcastValidation, blockLocallyProduced};
|
||||
|
||||
switch (broadcastValidation) {
|
||||
@@ -217,14 +263,16 @@ export function getBeaconBlockApi({
|
||||
chain.logger.info("Publishing block", valLogMeta);
|
||||
const publishPromises = [
|
||||
// Send the block, regardless of whether or not it is valid. The API
|
||||
// specification is very clear that this is the desired behaviour.
|
||||
// specification is very clear that this is the desired behavior.
|
||||
//
|
||||
// i) Publish blobs and block before importing so that network can see them asap
|
||||
// ii) publish block first because
|
||||
// a) as soon as node sees block they can start processing it while blobs arrive
|
||||
// - Publish blobs and block before importing so that network can see them asap
|
||||
// - Publish block first because
|
||||
// a) as soon as node sees block they can start processing it while data is in transit
|
||||
// b) getting block first allows nodes to use getBlobs from local ELs and save
|
||||
// import latency and hopefully bandwidth
|
||||
() => network.publishBeaconBlock(signedBlock) as Promise<unknown>,
|
||||
//
|
||||
() => network.publishBeaconBlock(signedBlock),
|
||||
...dataColumnSidecars.map((dataColumnSidecar) => () => network.publishDataColumnSidecar(dataColumnSidecar)),
|
||||
...blobSidecars.map((blobSidecar) => () => network.publishBlobSidecar(blobSidecar)),
|
||||
() =>
|
||||
// there is no rush to persist block since we published it to gossip anyway
|
||||
@@ -240,28 +288,64 @@ export function getBeaconBlockApi({
|
||||
throw e;
|
||||
}),
|
||||
];
|
||||
await promiseAllMaybeAsync(publishPromises);
|
||||
const sentPeersArr = await promiseAllMaybeAsync<number | void>(publishPromises);
|
||||
|
||||
if (chain.emitter.listenerCount(routes.events.EventType.blockGossip)) {
|
||||
chain.emitter.emit(routes.events.EventType.blockGossip, {slot, block: blockRoot});
|
||||
if (isForkPostFulu(fork)) {
|
||||
let columnsPublishedWithZeroPeers = 0;
|
||||
// sent peers per topic are logged in network.publishGossip(), here we only track metrics for it
|
||||
// starting from fulu, we have to push to 128 subnets so need to make sure we have enough sent peers per topic
|
||||
// + 1 because we publish to beacon_block first
|
||||
for (let i = 0; i < dataColumnSidecars.length; i++) {
|
||||
// + 1 because we publish to beacon_block first
|
||||
const sentPeers = sentPeersArr[i + 1] as number;
|
||||
// sent peers could be 0 as we set `allowPublishToZeroTopicPeers=true` in network.publishDataColumnSidecar() api
|
||||
metrics?.dataColumns.sentPeersPerSubnet.observe(sentPeers);
|
||||
if (sentPeers === 0) {
|
||||
columnsPublishedWithZeroPeers++;
|
||||
}
|
||||
}
|
||||
if (columnsPublishedWithZeroPeers > 0) {
|
||||
chain.logger.warn("Published data columns to 0 peers, increased risk of reorg", {
|
||||
slot,
|
||||
blockRoot,
|
||||
columns: columnsPublishedWithZeroPeers,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
chain.emitter.listenerCount(routes.events.EventType.blobSidecar) &&
|
||||
blockForImport.type === BlockInputType.availableData &&
|
||||
(blockForImport.blockData.fork === ForkName.deneb || blockForImport.blockData.fork === ForkName.electra)
|
||||
) {
|
||||
const {blobs} = blockForImport.blockData;
|
||||
chain.emitter.emit(routes.events.EventType.blockGossip, {slot, block: blockRoot});
|
||||
|
||||
for (const blobSidecar of blobs) {
|
||||
const {index, kzgCommitment} = blobSidecar;
|
||||
chain.emitter.emit(routes.events.EventType.blobSidecar, {
|
||||
blockRoot,
|
||||
slot,
|
||||
index,
|
||||
kzgCommitment: toHex(kzgCommitment),
|
||||
versionedHash: toHex(kzgCommitmentToVersionedHash(kzgCommitment)),
|
||||
});
|
||||
if (blockForImport.type === BlockInputType.availableData) {
|
||||
if (isForkPostFulu(blockForImport.blockData.fork)) {
|
||||
const {dataColumns} = blockForImport.blockData as BlockInputDataColumns;
|
||||
metrics?.dataColumns.bySource.inc({source: DataColumnsSource.api}, dataColumns.length);
|
||||
|
||||
if (chain.emitter.listenerCount(routes.events.EventType.dataColumnSidecar)) {
|
||||
for (const dataColumnSidecar of dataColumns) {
|
||||
chain.emitter.emit(routes.events.EventType.dataColumnSidecar, {
|
||||
blockRoot,
|
||||
slot,
|
||||
index: dataColumnSidecar.index,
|
||||
kzgCommitments: dataColumnSidecar.kzgCommitments.map(toHex),
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if (
|
||||
isForkPostDeneb(blockForImport.blockData.fork) &&
|
||||
chain.emitter.listenerCount(routes.events.EventType.blobSidecar)
|
||||
) {
|
||||
const {blobs} = blockForImport.blockData as BlockInputBlobs;
|
||||
|
||||
for (const blobSidecar of blobs) {
|
||||
const {index, kzgCommitment} = blobSidecar;
|
||||
chain.emitter.emit(routes.events.EventType.blobSidecar, {
|
||||
blockRoot,
|
||||
slot,
|
||||
index,
|
||||
kzgCommitment: toHex(kzgCommitment),
|
||||
versionedHash: toHex(kzgCommitmentToVersionedHash(kzgCommitment)),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -280,20 +364,20 @@ export function getBeaconBlockApi({
|
||||
const fork = config.getForkName(slot);
|
||||
|
||||
// Either the payload/blobs are cached from i) engine locally or ii) they are from the builder
|
||||
//
|
||||
// executionPayload can be null or a real payload in locally produced so check for presence of root
|
||||
const executionPayload = chain.producedBlockRoot.get(blockRoot);
|
||||
if (executionPayload !== undefined) {
|
||||
const producedResult = chain.blockProductionCache.get(blockRoot);
|
||||
if (producedResult !== undefined && producedResult.type !== BlockType.Blinded) {
|
||||
const source = ProducedBlockSource.engine;
|
||||
chain.logger.debug("Reconstructing signedBlockOrContents", {slot, blockRoot, source});
|
||||
chain.logger.debug("Reconstructing the full signed block contents", {slot, blockRoot, source});
|
||||
|
||||
const contents = executionPayload
|
||||
? (chain.producedContentsCache.get(toRootHex(executionPayload.blockHash)) ?? null)
|
||||
: null;
|
||||
const signedBlockOrContents = reconstructFullBlockOrContents(signedBlindedBlock, {executionPayload, contents});
|
||||
const signedBlockContents = reconstructSignedBlockContents(
|
||||
fork,
|
||||
signedBlindedBlock,
|
||||
(producedResult as ProduceFullBellatrix).executionPayload ?? null,
|
||||
(producedResult as ProduceFullDeneb).blobsBundle ?? null
|
||||
);
|
||||
|
||||
chain.logger.info("Publishing assembled block", {slot, blockRoot, source});
|
||||
return publishBlock({signedBlockOrContents}, {...context, sszBytes: null}, opts);
|
||||
return publishBlock({signedBlockContents}, {...context, sszBytes: null}, opts);
|
||||
}
|
||||
|
||||
const source = ProducedBlockSource.builder;
|
||||
@@ -307,9 +391,9 @@ export function getBeaconBlockApi({
|
||||
} else {
|
||||
// TODO: After fulu is live and all builders support submitBlindedBlockV2, we can safely remove
|
||||
// this code block and related functions
|
||||
chain.logger.debug("Reconstructing signedBlockOrContents", {slot, blockRoot, source});
|
||||
chain.logger.debug("Reconstructing full signed block contents", {slot, blockRoot, source});
|
||||
|
||||
const signedBlockOrContents = await reconstructBuilderBlockOrContents(chain, {
|
||||
const signedBlockContents = await reconstructBuilderSignedBlockContents(chain, {
|
||||
data: signedBlindedBlock,
|
||||
bytes: context?.sszBytes,
|
||||
});
|
||||
@@ -319,7 +403,7 @@ export function getBeaconBlockApi({
|
||||
//
|
||||
// see: https://github.com/ChainSafe/lodestar/issues/5404
|
||||
chain.logger.info("Publishing assembled block", {slot, blockRoot, source});
|
||||
return publishBlock({signedBlockOrContents}, {...context, sszBytes: null}, {...opts, ignoreIfKnown: true});
|
||||
return publishBlock({signedBlockContents}, {...context, sszBytes: null}, {...opts, ignoreIfKnown: true});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -528,7 +612,13 @@ export function getBeaconBlockApi({
|
||||
assertUniqueItems(indices, "Duplicate indices provided");
|
||||
|
||||
const {block, executionOptimistic, finalized} = await getBlockResponse(chain, blockId);
|
||||
const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message);
|
||||
const fork = config.getForkName(block.message.slot);
|
||||
|
||||
if (isForkPostFulu(fork)) {
|
||||
throw new ApiError(400, `Use getBlobs to retrieve blobs for post-fulu fork=${fork}`);
|
||||
}
|
||||
|
||||
const blockRoot = sszTypesFor(fork).BeaconBlock.hashTreeRoot(block.message);
|
||||
|
||||
let {blobSidecars} = (await db.blobSidecars.get(blockRoot)) ?? {};
|
||||
if (!blobSidecars) {
|
||||
@@ -548,20 +638,95 @@ export function getBeaconBlockApi({
|
||||
},
|
||||
};
|
||||
},
|
||||
|
||||
async getBlobs({blockId, versionedHashes}) {
|
||||
assertUniqueItems(versionedHashes, "Duplicate versioned hashes provided");
|
||||
|
||||
const {block, executionOptimistic, finalized} = await getBlockResponse(chain, blockId);
|
||||
const fork = config.getForkName(block.message.slot);
|
||||
const blockRoot = sszTypesFor(fork).BeaconBlock.hashTreeRoot(block.message);
|
||||
|
||||
let blobs: deneb.Blobs;
|
||||
|
||||
if (isForkPostFulu(fork)) {
|
||||
const {targetCustodyGroupCount} = chain.custodyConfig;
|
||||
if (targetCustodyGroupCount < NUMBER_OF_COLUMNS / 2) {
|
||||
throw Error(
|
||||
`Custody group count of ${targetCustodyGroupCount} is not sufficient to serve blobs, must custody at least ${NUMBER_OF_COLUMNS / 2} data columns`
|
||||
);
|
||||
}
|
||||
|
||||
let dataColumnSidecars = await db.dataColumnSidecar.values(blockRoot);
|
||||
if (dataColumnSidecars.length === 0) {
|
||||
dataColumnSidecars = await db.dataColumnSidecarArchive.values(block.message.slot);
|
||||
}
|
||||
|
||||
if (dataColumnSidecars.length === 0) {
|
||||
throw new ApiError(
|
||||
404,
|
||||
`dataColumnSidecars not found in db for slot=${block.message.slot} root=${toRootHex(blockRoot)}`
|
||||
);
|
||||
}
|
||||
|
||||
blobs = await reconstructBlobs(dataColumnSidecars);
|
||||
} else if (isForkPostDeneb(fork)) {
|
||||
let {blobSidecars} = (await db.blobSidecars.get(blockRoot)) ?? {};
|
||||
if (!blobSidecars) {
|
||||
({blobSidecars} = (await db.blobSidecarsArchive.get(block.message.slot)) ?? {});
|
||||
}
|
||||
|
||||
if (!blobSidecars) {
|
||||
throw new ApiError(
|
||||
404,
|
||||
`blobSidecars not found in db for slot=${block.message.slot} root=${toRootHex(blockRoot)}`
|
||||
);
|
||||
}
|
||||
|
||||
blobs = blobSidecars.sort((a, b) => a.index - b.index).map(({blob}) => blob);
|
||||
} else {
|
||||
blobs = [];
|
||||
}
|
||||
|
||||
if (blobs.length && versionedHashes?.length) {
|
||||
const kzgCommitments = (block as deneb.SignedBeaconBlock).message.body.blobKzgCommitments;
|
||||
|
||||
const blockVersionedHashes = kzgCommitments.map((commitment) =>
|
||||
toHex(kzgCommitmentToVersionedHash(commitment))
|
||||
);
|
||||
|
||||
const requestedIndices: number[] = [];
|
||||
for (const requestedHash of versionedHashes) {
|
||||
const index = blockVersionedHashes.findIndex((hash) => hash === requestedHash);
|
||||
if (index === -1) {
|
||||
throw new ApiError(400, `Versioned hash ${requestedHash} not found in block`);
|
||||
}
|
||||
requestedIndices.push(index);
|
||||
}
|
||||
|
||||
blobs = requestedIndices.sort((a, b) => a - b).map((index) => blobs[index]);
|
||||
}
|
||||
|
||||
return {
|
||||
data: blobs,
|
||||
meta: {
|
||||
executionOptimistic,
|
||||
finalized,
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function reconstructBuilderBlockOrContents(
|
||||
async function reconstructBuilderSignedBlockContents(
|
||||
chain: ApiModules["chain"],
|
||||
signedBlindedBlock: WithOptionalBytes<SignedBlindedBeaconBlock>
|
||||
): Promise<SignedBeaconBlockOrContents> {
|
||||
): Promise<SignedBlockContents> {
|
||||
const executionBuilder = chain.executionBuilder;
|
||||
if (!executionBuilder) {
|
||||
throw Error("executionBuilder required to publish SignedBlindedBeaconBlock");
|
||||
}
|
||||
|
||||
const signedBlockOrContents = await executionBuilder.submitBlindedBlock(signedBlindedBlock);
|
||||
return signedBlockOrContents;
|
||||
return executionBuilder.submitBlindedBlock(signedBlindedBlock);
|
||||
}
|
||||
|
||||
async function submitBlindedBlockToBuilder(
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import {routes} from "@lodestar/api";
|
||||
import {ApplicationMethods} from "@lodestar/api/server";
|
||||
import {ForkPostElectra, ForkPreElectra, SYNC_COMMITTEE_SUBNET_SIZE, isForkPostElectra} from "@lodestar/params";
|
||||
import {Attestation, Epoch, SingleAttestation, isElectraAttestation, ssz} from "@lodestar/types";
|
||||
import {Attestation, Epoch, SingleAttestation, isElectraAttestation, ssz, sszTypesFor} from "@lodestar/types";
|
||||
import {
|
||||
AttestationError,
|
||||
AttestationErrorCode,
|
||||
@@ -96,6 +96,7 @@ export function getBeaconPoolApi({
|
||||
},
|
||||
|
||||
async submitPoolAttestationsV2({signedAttestations}) {
|
||||
const fork = chain.config.getForkName(chain.clock.currentSlot);
|
||||
const seenTimestampSec = Date.now() / 1000;
|
||||
const failures: FailureList = [];
|
||||
// api attestation has high priority, we allow them to be added to pool even when it's late
|
||||
@@ -106,13 +107,12 @@ export function getBeaconPoolApi({
|
||||
await Promise.all(
|
||||
signedAttestations.map(async (attestation, i) => {
|
||||
try {
|
||||
const fork = chain.config.getForkName(chain.clock.currentSlot);
|
||||
const validateFn = () => validateApiAttestation(fork, chain, {attestation, serializedData: null});
|
||||
const {slot, beaconBlockRoot} = attestation.data;
|
||||
// when a validator is configured with multiple beacon node urls, this attestation data may come from another beacon node
|
||||
// and the block hasn't been in our forkchoice since we haven't seen / processing that block
|
||||
// see https://github.com/ChainSafe/lodestar/issues/5098
|
||||
const {indexedAttestation, subnet, attDataRootHex, committeeIndex, committeeValidatorIndex, committeeSize} =
|
||||
const {indexedAttestation, subnet, attDataRootHex, committeeIndex, validatorCommitteeIndex, committeeSize} =
|
||||
await validateGossipFnRetryUnknownRoot(validateFn, network, chain, slot, beaconBlockRoot);
|
||||
|
||||
if (network.shouldAggregate(subnet, slot)) {
|
||||
@@ -120,7 +120,7 @@ export function getBeaconPoolApi({
|
||||
committeeIndex,
|
||||
attestation,
|
||||
attDataRootHex,
|
||||
committeeValidatorIndex,
|
||||
validatorCommitteeIndex,
|
||||
committeeSize,
|
||||
priority
|
||||
);
|
||||
@@ -163,7 +163,7 @@ export function getBeaconPoolApi({
|
||||
failures.push({index: i, message: (e as Error).message});
|
||||
logger.error(`Error on submitPoolAttestations [${i}]`, logCtx, e as Error);
|
||||
if (e instanceof AttestationError && e.action === GossipAction.REJECT) {
|
||||
chain.persistInvalidSszValue(ssz.phase0.Attestation, attestation, "api_reject");
|
||||
chain.persistInvalidSszValue(sszTypesFor(fork).SingleAttestation, attestation, "api_reject");
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
import {routes} from "@lodestar/api";
|
||||
import {ApplicationMethods} from "@lodestar/api/server";
|
||||
import {EPOCHS_PER_HISTORICAL_VECTOR, isForkPostElectra, isForkPostFulu} from "@lodestar/params";
|
||||
import {
|
||||
EPOCHS_PER_HISTORICAL_VECTOR,
|
||||
SLOTS_PER_EPOCH,
|
||||
SYNC_COMMITTEE_SUBNET_SIZE,
|
||||
isForkPostElectra,
|
||||
isForkPostFulu,
|
||||
} from "@lodestar/params";
|
||||
import {
|
||||
BeaconStateAllForks,
|
||||
BeaconStateElectra,
|
||||
@@ -12,8 +18,7 @@ import {
|
||||
getRandaoMix,
|
||||
loadState,
|
||||
} from "@lodestar/state-transition";
|
||||
import {getValidatorStatus} from "@lodestar/types";
|
||||
import {fromHex} from "@lodestar/utils";
|
||||
import {ValidatorIndex, getValidatorStatus} from "@lodestar/types";
|
||||
import {ApiError} from "../../errors.js";
|
||||
import {ApiModules} from "../../types.js";
|
||||
import {assertUniqueItems} from "../../utils.js";
|
||||
@@ -210,16 +215,13 @@ export function getBeaconStateApi({
|
||||
const headState = chain.getHeadState();
|
||||
const balances: routes.beacon.ValidatorBalance[] = [];
|
||||
for (const id of validatorIds) {
|
||||
if (typeof id === "number") {
|
||||
if (state.validators.length <= id) {
|
||||
continue;
|
||||
}
|
||||
balances.push({index: id, balance: state.balances.get(id)});
|
||||
} else {
|
||||
const index = headState.epochCtx.pubkey2index.get(fromHex(id));
|
||||
if (index != null && index <= state.validators.length) {
|
||||
balances.push({index, balance: state.balances.get(index)});
|
||||
}
|
||||
const resp = getStateValidatorIndex(id, state, headState.epochCtx.pubkey2index);
|
||||
|
||||
if (resp.valid) {
|
||||
balances.push({
|
||||
index: resp.validatorIndex,
|
||||
balance: state.balances.get(resp.validatorIndex),
|
||||
});
|
||||
}
|
||||
}
|
||||
return {
|
||||
@@ -252,8 +254,19 @@ export function getBeaconStateApi({
|
||||
throw new ApiError(400, `No cached state available for stateId: ${stateId}`);
|
||||
}
|
||||
|
||||
const epoch = filters.epoch ?? computeEpochAtSlot(state.slot);
|
||||
const stateEpoch = computeEpochAtSlot(state.slot);
|
||||
const epoch = filters.epoch ?? stateEpoch;
|
||||
const startSlot = computeStartSlotAtEpoch(epoch);
|
||||
const endSlot = startSlot + SLOTS_PER_EPOCH - 1;
|
||||
|
||||
if (Math.abs(epoch - stateEpoch) > 1) {
|
||||
throw new ApiError(400, `Epoch ${epoch} must be within one epoch of state epoch ${stateEpoch}`);
|
||||
}
|
||||
|
||||
if (filters.slot !== undefined && (filters.slot < startSlot || filters.slot > endSlot)) {
|
||||
throw new ApiError(400, `Slot ${filters.slot} is not in epoch ${epoch}`);
|
||||
}
|
||||
|
||||
const decisionRoot = stateCached.epochCtx.getShufflingDecisionRoot(epoch);
|
||||
const shuffling = await chain.shufflingCache.get(epoch, decisionRoot);
|
||||
if (!shuffling) {
|
||||
@@ -309,12 +322,18 @@ export function getBeaconStateApi({
|
||||
}
|
||||
|
||||
const syncCommitteeCache = stateCached.epochCtx.getIndexedSyncCommitteeAtEpoch(epoch ?? stateEpoch);
|
||||
const validatorIndices = new Array<ValidatorIndex>(...syncCommitteeCache.validatorIndices);
|
||||
|
||||
// Subcommittee assignments of the current sync committee
|
||||
const validatorAggregates: ValidatorIndex[][] = [];
|
||||
for (let i = 0; i < validatorIndices.length; i += SYNC_COMMITTEE_SUBNET_SIZE) {
|
||||
validatorAggregates.push(validatorIndices.slice(i, i + SYNC_COMMITTEE_SUBNET_SIZE));
|
||||
}
|
||||
|
||||
return {
|
||||
data: {
|
||||
validators: new Array(...syncCommitteeCache.validatorIndices),
|
||||
// TODO: This is not used by the validator and will be deprecated soon
|
||||
validatorAggregates: [],
|
||||
validators: validatorIndices,
|
||||
validatorAggregates,
|
||||
},
|
||||
meta: {executionOptimistic, finalized},
|
||||
};
|
||||
|
||||
@@ -3,15 +3,19 @@ import {ApplicationMethods} from "@lodestar/api/server";
|
||||
import {ExecutionStatus} from "@lodestar/fork-choice";
|
||||
import {ZERO_HASH_HEX} from "@lodestar/params";
|
||||
import {BeaconState} from "@lodestar/types";
|
||||
import {toRootHex} from "@lodestar/utils";
|
||||
import {isOptimisticBlock} from "../../../util/forkChoice.js";
|
||||
import {getStateSlotFromBytes} from "../../../util/multifork.js";
|
||||
import {getBlockResponse} from "../beacon/blocks/utils.js";
|
||||
import {getStateResponseWithRegen} from "../beacon/state/utils.js";
|
||||
import {ApiModules} from "../types.js";
|
||||
import {assertUniqueItems} from "../utils.js";
|
||||
|
||||
export function getDebugApi({
|
||||
chain,
|
||||
config,
|
||||
}: Pick<ApiModules, "chain" | "config">): ApplicationMethods<routes.debug.Endpoints> {
|
||||
db,
|
||||
}: Pick<ApiModules, "chain" | "config" | "db">): ApplicationMethods<routes.debug.Endpoints> {
|
||||
return {
|
||||
async getDebugChainHeadsV2() {
|
||||
const heads = chain.forkChoice.getHeads();
|
||||
@@ -85,5 +89,31 @@ export function getDebugApi({
|
||||
},
|
||||
};
|
||||
},
|
||||
|
||||
async getDebugDataColumnSidecars({blockId, indices}) {
|
||||
assertUniqueItems(indices, "Duplicate indices provided");
|
||||
|
||||
const {block, executionOptimistic, finalized} = await getBlockResponse(chain, blockId);
|
||||
const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message);
|
||||
|
||||
let dataColumnSidecars = await db.dataColumnSidecar.values(blockRoot);
|
||||
|
||||
if (dataColumnSidecars.length === 0) {
|
||||
dataColumnSidecars = await db.dataColumnSidecarArchive.values(block.message.slot);
|
||||
}
|
||||
|
||||
if (dataColumnSidecars.length === 0) {
|
||||
throw Error(`dataColumnSidecars not found in db for slot=${block.message.slot} root=${toRootHex(blockRoot)}`);
|
||||
}
|
||||
|
||||
return {
|
||||
data: indices ? dataColumnSidecars.filter(({index}) => indices.includes(index)) : dataColumnSidecars,
|
||||
meta: {
|
||||
executionOptimistic,
|
||||
finalized,
|
||||
version: config.getForkName(block.message.slot),
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ export function getEventsApi({
|
||||
const onAbortFns: (() => void)[] = [];
|
||||
|
||||
for (const topic of topics) {
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
// biome-ignore lint/suspicious/noExplicitAny: We need to use `any` type here
|
||||
const handler = (data: any): void => {
|
||||
// TODO: What happens if this handler throws? Does it break the other chain.emitter listeners?
|
||||
|
||||
|
||||
@@ -3,9 +3,8 @@ import {routes} from "@lodestar/api";
|
||||
import {ApplicationMethods} from "@lodestar/api/server";
|
||||
import {ExecutionStatus} from "@lodestar/fork-choice";
|
||||
import {
|
||||
ForkName,
|
||||
ForkPostBellatrix,
|
||||
ForkPostDeneb,
|
||||
ForkPreDeneb,
|
||||
ForkSeq,
|
||||
GENESIS_SLOT,
|
||||
SLOTS_PER_EPOCH,
|
||||
@@ -44,7 +43,6 @@ import {
|
||||
Wei,
|
||||
bellatrix,
|
||||
getValidatorStatus,
|
||||
isBlockContents,
|
||||
phase0,
|
||||
ssz,
|
||||
} from "@lodestar/types";
|
||||
@@ -68,14 +66,16 @@ import {
|
||||
} from "../../../chain/errors/index.js";
|
||||
import {ChainEvent, CheckpointHex, CommonBlockBody} from "../../../chain/index.js";
|
||||
import {SCHEDULER_LOOKAHEAD_FACTOR} from "../../../chain/prepareNextSlot.js";
|
||||
import {BlockType, ProduceFullDeneb} from "../../../chain/produceBlock/index.js";
|
||||
import {RegenCaller} from "../../../chain/regen/index.js";
|
||||
import {validateApiAggregateAndProof} from "../../../chain/validation/index.js";
|
||||
import {validateSyncCommitteeGossipContributionAndProof} from "../../../chain/validation/syncCommitteeContributionAndProof.js";
|
||||
import {ZERO_HASH} from "../../../constants/index.js";
|
||||
import {NoBidReceived} from "../../../execution/builder/http.js";
|
||||
import {BuilderStatus, NoBidReceived} from "../../../execution/builder/http.js";
|
||||
import {validateGossipFnRetryUnknownRoot} from "../../../network/processor/gossipHandlers.js";
|
||||
import {CommitteeSubscription} from "../../../network/subnets/index.js";
|
||||
import {SyncState} from "../../../sync/index.js";
|
||||
import {callInNextEventLoop} from "../../../util/eventLoop.js";
|
||||
import {isOptimisticBlock} from "../../../util/forkChoice.js";
|
||||
import {getDefaultGraffiti, toGraffitiBytes} from "../../../util/graffiti.js";
|
||||
import {getLodestarClientVersion} from "../../../util/metadata.js";
|
||||
@@ -112,17 +112,17 @@ const BLOCK_PRODUCTION_RACE_CUTOFF_MS = 2_000;
|
||||
/** Overall timeout for execution and block production apis */
|
||||
const BLOCK_PRODUCTION_RACE_TIMEOUT_MS = 12_000;
|
||||
|
||||
type ProduceBlockOrContentsRes = {executionPayloadValue: Wei; consensusBlockValue: Wei} & (
|
||||
| {data: BeaconBlock<ForkPreDeneb>; version: ForkPreDeneb}
|
||||
| {data: BlockContents; version: ForkPostDeneb}
|
||||
);
|
||||
type ProduceBlockContentsRes = {executionPayloadValue: Wei; consensusBlockValue: Wei} & {
|
||||
data: BlockContents;
|
||||
version: ForkName;
|
||||
};
|
||||
type ProduceBlindedBlockRes = {executionPayloadValue: Wei; consensusBlockValue: Wei} & {
|
||||
data: BlindedBeaconBlock;
|
||||
version: ForkPostBellatrix;
|
||||
};
|
||||
|
||||
type ProduceFullOrBlindedBlockOrContentsRes = {executionPayloadSource: ProducedBlockSource} & (
|
||||
| (ProduceBlockOrContentsRes & {executionPayloadBlinded: false})
|
||||
type ProduceBlindedBlockOrBlockContentsRes = {executionPayloadSource: ProducedBlockSource} & (
|
||||
| (ProduceBlockContentsRes & {executionPayloadBlinded: false})
|
||||
| (ProduceBlindedBlockRes & {executionPayloadBlinded: true})
|
||||
);
|
||||
|
||||
@@ -423,10 +423,17 @@ export function getValidatorApi(
|
||||
|
||||
// Error early for builder if builder flow not active
|
||||
if (!chain.executionBuilder) {
|
||||
throw Error("Execution builder not set");
|
||||
throw Error("External builder not configured");
|
||||
}
|
||||
if (!chain.executionBuilder.status) {
|
||||
throw Error("Execution builder disabled");
|
||||
|
||||
switch (chain.executionBuilder.status) {
|
||||
case BuilderStatus.disabled:
|
||||
throw Error("External builder disabled due to failed status check");
|
||||
case BuilderStatus.circuitBreaker:
|
||||
throw Error("External builder circuit breaker is activated");
|
||||
case BuilderStatus.enabled:
|
||||
// continue
|
||||
break;
|
||||
}
|
||||
|
||||
let timer: undefined | ((opts: {source: ProducedBlockSource}) => number);
|
||||
@@ -462,7 +469,7 @@ export function getValidatorApi(
|
||||
}
|
||||
}
|
||||
|
||||
async function produceEngineFullBlockOrContents(
|
||||
async function produceEngineBlockContents(
|
||||
slot: Slot,
|
||||
randaoReveal: BLSSignature,
|
||||
graffiti: Bytes32,
|
||||
@@ -477,7 +484,7 @@ export function getValidatorApi(
|
||||
parentBlockRoot: Root;
|
||||
parentSlot: Slot;
|
||||
}
|
||||
): Promise<ProduceBlockOrContentsRes & {shouldOverrideBuilder?: boolean}> {
|
||||
): Promise<ProduceBlockContentsRes & {shouldOverrideBuilder?: boolean}> {
|
||||
const source = ProducedBlockSource.engine;
|
||||
metrics?.blockProductionRequests.inc({source});
|
||||
|
||||
@@ -505,24 +512,36 @@ export function getValidatorApi(
|
||||
metrics?.blockProductionNumAggregated.observe({source}, block.body.attestations.length);
|
||||
metrics?.blockProductionConsensusBlockValue.observe({source}, Number(formatWeiToEth(consensusBlockValue)));
|
||||
metrics?.blockProductionExecutionPayloadValue.observe({source}, Number(formatWeiToEth(executionPayloadValue)));
|
||||
|
||||
const blockRoot = toRootHex(config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block));
|
||||
logger.verbose("Produced execution block", {
|
||||
slot,
|
||||
executionPayloadValue,
|
||||
consensusBlockValue,
|
||||
root: toRootHex(config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block)),
|
||||
root: blockRoot,
|
||||
});
|
||||
if (chain.opts.persistProducedBlocks) {
|
||||
void chain.persistBlock(block, "produced_engine_block");
|
||||
}
|
||||
if (isForkPostDeneb(version)) {
|
||||
const blockHash = toRootHex((block as bellatrix.BeaconBlock).body.executionPayload.blockHash);
|
||||
const contents = chain.producedContentsCache.get(blockHash);
|
||||
if (contents === undefined) {
|
||||
throw Error("contents missing in cache");
|
||||
const produceResult = chain.blockProductionCache.get(blockRoot);
|
||||
if (produceResult === undefined) {
|
||||
throw Error("production result missing in cache");
|
||||
}
|
||||
if (!isForkPostDeneb(produceResult.fork)) {
|
||||
throw Error("production result is for pre-deneb fork");
|
||||
}
|
||||
if (produceResult.type !== BlockType.Full) {
|
||||
throw Error("production result is not full block");
|
||||
}
|
||||
const blobsBundle = (produceResult as ProduceFullDeneb).blobsBundle;
|
||||
|
||||
return {
|
||||
data: {block, ...contents} as BlockContents,
|
||||
data: {
|
||||
block,
|
||||
blobs: blobsBundle.blobs,
|
||||
kzgProofs: blobsBundle.proofs,
|
||||
},
|
||||
version,
|
||||
executionPayloadValue,
|
||||
consensusBlockValue,
|
||||
@@ -530,7 +549,7 @@ export function getValidatorApi(
|
||||
};
|
||||
}
|
||||
|
||||
return {data: block, version, executionPayloadValue, consensusBlockValue, shouldOverrideBuilder};
|
||||
return {data: {block}, version, executionPayloadValue, consensusBlockValue, shouldOverrideBuilder};
|
||||
} finally {
|
||||
if (timer) timer({source});
|
||||
}
|
||||
@@ -544,7 +563,7 @@ export function getValidatorApi(
|
||||
_skipRandaoVerification?: boolean,
|
||||
builderBoostFactor?: bigint,
|
||||
{feeRecipient, builderSelection, strictFeeRecipientCheck}: routes.validator.ExtraProduceBlockOpts = {}
|
||||
): Promise<ProduceFullOrBlindedBlockOrContentsRes> {
|
||||
): Promise<ProduceBlindedBlockOrBlockContentsRes> {
|
||||
notWhileSyncing();
|
||||
await waitForSlot(slot); // Must never request for a future slot > currentSlot
|
||||
|
||||
@@ -623,7 +642,7 @@ export function getValidatorApi(
|
||||
: Promise.reject(new Error("Builder disabled"));
|
||||
|
||||
const enginePromise = isEngineEnabled
|
||||
? produceEngineFullBlockOrContents(slot, randaoReveal, graffitiBytes, {
|
||||
? produceEngineBlockContents(slot, randaoReveal, graffitiBytes, {
|
||||
feeRecipient,
|
||||
strictFeeRecipientCheck,
|
||||
commonBlockBodyPromise,
|
||||
@@ -660,32 +679,45 @@ export function getValidatorApi(
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
logger.verbose("Producing common block body", loggerContext);
|
||||
const commonBlockBodyStartedAt = Date.now();
|
||||
// Ensure builder and engine HTTP requests are sent before starting common block body production
|
||||
// by deferring the call to next event loop iteration, allowing pending I/O operations like
|
||||
// HTTP requests to be processed first and sent out early in slot.
|
||||
callInNextEventLoop(() => {
|
||||
logger.verbose("Producing common block body", loggerContext);
|
||||
const commonBlockBodyStartedAt = Date.now();
|
||||
|
||||
const produceCommonBlockBodyPromise = chain
|
||||
.produceCommonBlockBody({
|
||||
slot,
|
||||
parentBlockRoot,
|
||||
parentSlot,
|
||||
randaoReveal,
|
||||
graffiti: graffitiBytes,
|
||||
})
|
||||
.then((commonBlockBody) => {
|
||||
deferredCommonBlockBody.resolve(commonBlockBody);
|
||||
logger.verbose("Produced common block body", {
|
||||
...loggerContext,
|
||||
durationMs: Date.now() - commonBlockBodyStartedAt,
|
||||
});
|
||||
})
|
||||
.catch(deferredCommonBlockBody.reject);
|
||||
chain
|
||||
.produceCommonBlockBody({
|
||||
slot,
|
||||
parentBlockRoot,
|
||||
parentSlot,
|
||||
randaoReveal,
|
||||
graffiti: graffitiBytes,
|
||||
})
|
||||
.then((commonBlockBody) => {
|
||||
deferredCommonBlockBody.resolve(commonBlockBody);
|
||||
logger.verbose("Produced common block body", {
|
||||
...loggerContext,
|
||||
durationMs: Date.now() - commonBlockBodyStartedAt,
|
||||
});
|
||||
})
|
||||
.catch(deferredCommonBlockBody.reject);
|
||||
});
|
||||
|
||||
const [[builder, engine]] = await Promise.all([blockProductionRacePromise, produceCommonBlockBodyPromise]);
|
||||
const [builder, engine] = await blockProductionRacePromise;
|
||||
|
||||
if (builder.status === "pending" && engine.status === "pending") {
|
||||
throw Error("Builder and engine both failed to produce the block within timeout");
|
||||
}
|
||||
|
||||
if (builder.status === "pending" && !isEngineEnabled) {
|
||||
throw Error("Builder failed to produce the block within timeout");
|
||||
}
|
||||
|
||||
if (engine.status === "pending" && !isBuilderEnabled) {
|
||||
throw Error("Engine failed to produce the block within timeout");
|
||||
}
|
||||
|
||||
if (isEngineEnabled) {
|
||||
if (engine.status === "rejected") {
|
||||
logger.warn(
|
||||
@@ -804,8 +836,8 @@ export function getValidatorApi(
|
||||
|
||||
if (engine.status === "fulfilled" && builder.status === "fulfilled") {
|
||||
const result = selectBlockProductionSource({
|
||||
builderBlockValue: builder.value.executionPayloadValue + builder.value.consensusBlockValue,
|
||||
engineBlockValue: engine.value.executionPayloadValue + engine.value.consensusBlockValue,
|
||||
builderExecutionPayloadValue: builder.value.executionPayloadValue,
|
||||
engineExecutionPayloadValue: engine.value.executionPayloadValue,
|
||||
builderBoostFactor,
|
||||
builderSelection,
|
||||
});
|
||||
@@ -856,16 +888,8 @@ export function getValidatorApi(
|
||||
return {data, meta};
|
||||
}
|
||||
|
||||
if (isBlockContents(data)) {
|
||||
const {block} = data;
|
||||
const blindedBlock = beaconBlockToBlinded(config, block as BeaconBlock<ForkPostBellatrix>);
|
||||
return {
|
||||
data: blindedBlock,
|
||||
meta: {...meta, executionPayloadBlinded: true},
|
||||
};
|
||||
}
|
||||
|
||||
const blindedBlock = beaconBlockToBlinded(config, data as BeaconBlock<ForkPostBellatrix>);
|
||||
const {block} = data as BlockContents;
|
||||
const blindedBlock = beaconBlockToBlinded(config, block as BeaconBlock<ForkPostBellatrix>);
|
||||
return {
|
||||
data: blindedBlock,
|
||||
meta: {...meta, executionPayloadBlinded: true},
|
||||
@@ -1186,12 +1210,12 @@ export function getValidatorApi(
|
||||
const pubkeys = getPubkeysForIndices(state.validators, indices);
|
||||
// Ensures `epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD <= current_epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD + 1`
|
||||
const syncCommitteeCache = state.epochCtx.getIndexedSyncCommitteeAtEpoch(epoch);
|
||||
const syncCommitteeValidatorIndexMap = syncCommitteeCache.validatorIndexMap;
|
||||
const validatorSyncCommitteeIndexMap = syncCommitteeCache.validatorIndexMap;
|
||||
|
||||
const duties: routes.validator.SyncDuty[] = [];
|
||||
for (let i = 0, len = indices.length; i < len; i++) {
|
||||
const validatorIndex = indices[i];
|
||||
const validatorSyncCommitteeIndices = syncCommitteeValidatorIndexMap.get(validatorIndex);
|
||||
const validatorSyncCommitteeIndices = validatorSyncCommitteeIndexMap.get(validatorIndex);
|
||||
if (validatorSyncCommitteeIndices) {
|
||||
duties.push({
|
||||
pubkey: pubkeys[i],
|
||||
@@ -1277,19 +1301,14 @@ export function getValidatorApi(
|
||||
// when a validator is configured with multiple beacon node urls, this attestation may come from another beacon node
|
||||
// and the block hasn't been in our forkchoice since we haven't seen / processing that block
|
||||
// see https://github.com/ChainSafe/lodestar/issues/5098
|
||||
const {indexedAttestation, committeeIndices, attDataRootHex} = await validateGossipFnRetryUnknownRoot(
|
||||
validateFn,
|
||||
network,
|
||||
chain,
|
||||
slot,
|
||||
beaconBlockRoot
|
||||
);
|
||||
const {indexedAttestation, committeeValidatorIndices, attDataRootHex} =
|
||||
await validateGossipFnRetryUnknownRoot(validateFn, network, chain, slot, beaconBlockRoot);
|
||||
|
||||
const insertOutcome = chain.aggregatedAttestationPool.add(
|
||||
signedAggregateAndProof.message.aggregate,
|
||||
attDataRootHex,
|
||||
indexedAttestation.attestingIndices.length,
|
||||
committeeIndices
|
||||
committeeValidatorIndices
|
||||
);
|
||||
metrics?.opPool.aggregatedAttestationPool.apiInsertOutcome.inc({insertOutcome});
|
||||
|
||||
@@ -1483,7 +1502,7 @@ export function getValidatorApi(
|
||||
|
||||
async registerValidator({registrations}) {
|
||||
if (!chain.executionBuilder) {
|
||||
throw Error("Execution builder not enabled");
|
||||
throw Error("External builder not configured");
|
||||
}
|
||||
|
||||
// should only send active or pending validator to builder
|
||||
|
||||
@@ -47,13 +47,13 @@ export function getPubkeysForIndices(
|
||||
|
||||
export function selectBlockProductionSource({
|
||||
builderSelection,
|
||||
engineBlockValue,
|
||||
builderBlockValue,
|
||||
engineExecutionPayloadValue,
|
||||
builderExecutionPayloadValue,
|
||||
builderBoostFactor,
|
||||
}: {
|
||||
builderSelection: routes.validator.BuilderSelection;
|
||||
engineBlockValue: bigint;
|
||||
builderBlockValue: bigint;
|
||||
engineExecutionPayloadValue: bigint;
|
||||
builderExecutionPayloadValue: bigint;
|
||||
builderBoostFactor: bigint;
|
||||
}): BlockSelectionResult {
|
||||
switch (builderSelection) {
|
||||
@@ -71,7 +71,7 @@ export function selectBlockProductionSource({
|
||||
return {source: ProducedBlockSource.builder, reason: BuilderBlockSelectionReason.BuilderPreferred};
|
||||
}
|
||||
|
||||
if (engineBlockValue >= (builderBlockValue * builderBoostFactor) / BigInt(100)) {
|
||||
if (engineExecutionPayloadValue >= (builderExecutionPayloadValue * builderBoostFactor) / BigInt(100)) {
|
||||
return {source: ProducedBlockSource.engine, reason: EngineBlockSelectionReason.BlockValue};
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ export class ArchiveStore {
|
||||
private archiveMode: ArchiveMode;
|
||||
private jobQueue: JobItemQueue<[CheckpointWithHex], void>;
|
||||
|
||||
private archiveBlobEpochs?: number;
|
||||
private archiveDataEpochs?: number;
|
||||
private readonly statesArchiverStrategy: StateArchiveStrategy;
|
||||
private readonly chain: IBeaconChain;
|
||||
private readonly db: IBeaconDb;
|
||||
@@ -52,7 +52,7 @@ export class ArchiveStore {
|
||||
this.opts = opts;
|
||||
this.signal = signal;
|
||||
this.archiveMode = opts.archiveMode;
|
||||
this.archiveBlobEpochs = opts.archiveBlobEpochs;
|
||||
this.archiveDataEpochs = opts.archiveDataEpochs;
|
||||
|
||||
this.jobQueue = new JobItemQueue<[CheckpointWithHex], void>(this.processFinalizedCheckpoint, {
|
||||
maxLength: PROCESS_FINALIZED_CHECKPOINT_QUEUE_LENGTH,
|
||||
@@ -184,7 +184,7 @@ export class ArchiveStore {
|
||||
this.logger,
|
||||
finalized,
|
||||
this.chain.clock.currentEpoch,
|
||||
this.archiveBlobEpochs,
|
||||
this.archiveDataEpochs,
|
||||
this.chain.opts.persistOrphanedBlocks,
|
||||
this.chain.opts.persistOrphanedBlocksDir
|
||||
);
|
||||
|
||||
@@ -22,7 +22,7 @@ export interface StatesArchiveOpts {
|
||||
|
||||
export type ArchiveStoreOpts = StatesArchiveOpts & {
|
||||
disableArchiveOnCheckpoint?: boolean;
|
||||
archiveBlobEpochs?: number;
|
||||
archiveDataEpochs?: number;
|
||||
pruneHistory?: boolean;
|
||||
serveHistoricalState?: boolean;
|
||||
};
|
||||
|
||||
@@ -55,7 +55,7 @@ export async function archiveBlocks(
|
||||
logger: Logger,
|
||||
finalizedCheckpoint: CheckpointHex,
|
||||
currentEpoch: Epoch,
|
||||
archiveBlobEpochs?: number,
|
||||
archiveDataEpochs?: number,
|
||||
persistOrphanedBlocks?: boolean,
|
||||
persistOrphanedBlocksDir?: string
|
||||
): Promise<void> {
|
||||
@@ -66,6 +66,7 @@ export async function archiveBlocks(
|
||||
|
||||
// NOTE: The finalized block will be exactly the first block of `epoch` or previous
|
||||
const finalizedPostDeneb = finalizedCheckpoint.epoch >= config.DENEB_FORK_EPOCH;
|
||||
const finalizedPostFulu = finalizedCheckpoint.epoch >= config.FULU_FORK_EPOCH;
|
||||
|
||||
const finalizedCanonicalBlockRoots: BlockRootSlot[] = finalizedCanonicalBlocks.map((block) => ({
|
||||
slot: block.slot,
|
||||
@@ -81,8 +82,24 @@ export async function archiveBlocks(
|
||||
});
|
||||
|
||||
if (finalizedPostDeneb) {
|
||||
const migrate = await migrateBlobSidecarsFromHotToColdDb(config, db, finalizedCanonicalBlockRoots, currentEpoch);
|
||||
logger.verbose(migrate ? "Migrated blobSidecars from hot DB to cold DB" : "Skip blobSidecars migration");
|
||||
const migratedEntries = await migrateBlobSidecarsFromHotToColdDb(
|
||||
config,
|
||||
db,
|
||||
finalizedCanonicalBlockRoots,
|
||||
currentEpoch
|
||||
);
|
||||
logger.verbose("Migrated blobSidecars from hot DB to cold DB", {migratedEntries});
|
||||
}
|
||||
|
||||
if (finalizedPostFulu) {
|
||||
const migratedEntries = await migrateDataColumnSidecarsFromHotToColdDb(
|
||||
config,
|
||||
db,
|
||||
logger,
|
||||
finalizedCanonicalBlockRoots,
|
||||
currentEpoch
|
||||
);
|
||||
logger.verbose("Migrated dataColumnSidecars from hot DB to cold DB", {migratedEntries});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,16 +134,21 @@ export async function archiveBlocks(
|
||||
|
||||
if (finalizedPostDeneb) {
|
||||
await db.blobSidecars.batchDelete(nonCanonicalBlockRoots);
|
||||
logger.verbose("Deleted non canonical blobsSider from hot DB");
|
||||
logger.verbose("Deleted non canonical blobSidecars from hot DB");
|
||||
}
|
||||
|
||||
if (finalizedPostFulu) {
|
||||
await db.dataColumnSidecar.deleteMany(nonCanonicalBlockRoots);
|
||||
logger.verbose("Deleted non canonical dataColumnSidecars from hot DB");
|
||||
}
|
||||
}
|
||||
|
||||
// Delete expired blobs
|
||||
// Keep only `[current_epoch - max(MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, archiveBlobEpochs)]
|
||||
// if archiveBlobEpochs set to Infinity do not prune`
|
||||
// Keep only `[current_epoch - max(MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, archiveDataEpochs)]`
|
||||
// if archiveDataEpochs set to Infinity do not prune`
|
||||
if (finalizedPostDeneb) {
|
||||
if (archiveBlobEpochs !== Infinity) {
|
||||
const blobsArchiveWindow = Math.max(config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, archiveBlobEpochs ?? 0);
|
||||
if (archiveDataEpochs !== Infinity) {
|
||||
const blobsArchiveWindow = Math.max(config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, archiveDataEpochs ?? 0);
|
||||
const blobSidecarsMinEpoch = currentEpoch - blobsArchiveWindow;
|
||||
if (blobSidecarsMinEpoch >= config.DENEB_FORK_EPOCH) {
|
||||
const slotsToDelete = await db.blobSidecarsArchive.keys({lt: computeStartSlotAtEpoch(blobSidecarsMinEpoch)});
|
||||
@@ -138,7 +160,37 @@ export async function archiveBlocks(
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.verbose("blobSidecars pruning skipped: archiveBlobEpochs set to Infinity");
|
||||
logger.verbose("blobSidecars pruning skipped: archiveDataEpochs set to Infinity");
|
||||
}
|
||||
}
|
||||
|
||||
// Delete expired data column sidecars
|
||||
// Keep only `[current_epoch - max(MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS, archiveDataEpochs)]`
|
||||
if (finalizedPostFulu) {
|
||||
if (archiveDataEpochs !== Infinity) {
|
||||
const dataColumnSidecarsArchiveWindow = Math.max(
|
||||
config.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS,
|
||||
archiveDataEpochs ?? 0
|
||||
);
|
||||
const dataColumnSidecarsMinEpoch = currentEpoch - dataColumnSidecarsArchiveWindow;
|
||||
if (dataColumnSidecarsMinEpoch >= config.FULU_FORK_EPOCH) {
|
||||
const slotsToDelete = (
|
||||
await db.dataColumnSidecarArchive.keys({
|
||||
lt: db.dataColumnSidecarArchive.getMaxKeyRaw(computeStartSlotAtEpoch(dataColumnSidecarsMinEpoch)),
|
||||
})
|
||||
).map((p) => p.prefix);
|
||||
|
||||
if (slotsToDelete.length > 0) {
|
||||
await db.dataColumnSidecarArchive.deleteMany(slotsToDelete);
|
||||
logger.verbose(`dataColumnSidecars prune: batchDelete range ${slotsToDelete[0]}..${slotsToDelete.at(-1)}`);
|
||||
} else {
|
||||
logger.verbose(`dataColumnSidecars prune: no entries before epoch ${dataColumnSidecarsMinEpoch}`);
|
||||
}
|
||||
} else {
|
||||
logger.verbose(`dataColumnSidecars pruning skipped: ${dataColumnSidecarsMinEpoch} is before fulu fork epoch`);
|
||||
}
|
||||
} else {
|
||||
logger.verbose("dataColumnSidecars pruning skipped: archiveDataEpochs set to Infinity");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,15 +257,14 @@ async function migrateBlobSidecarsFromHotToColdDb(
|
||||
db: IBeaconDb,
|
||||
blocks: BlockRootSlot[],
|
||||
currentEpoch: Epoch
|
||||
): Promise<boolean> {
|
||||
let result = false;
|
||||
|
||||
): Promise<number> {
|
||||
let migratedWrappedBlobSidecars = 0;
|
||||
for (let i = 0; i < blocks.length; i += BLOB_SIDECAR_BATCH_SIZE) {
|
||||
const toIdx = Math.min(i + BLOB_SIDECAR_BATCH_SIZE, blocks.length);
|
||||
const canonicalBlocks = blocks.slice(i, toIdx);
|
||||
|
||||
// processCanonicalBlocks
|
||||
if (canonicalBlocks.length === 0) return false;
|
||||
if (canonicalBlocks.length === 0) break;
|
||||
|
||||
// load Buffer instead of ssz deserialized to improve performance
|
||||
const canonicalBlobSidecarsEntries: KeyValue<Slot, Uint8Array>[] = await Promise.all(
|
||||
@@ -221,8 +272,11 @@ async function migrateBlobSidecarsFromHotToColdDb(
|
||||
.filter((block) => {
|
||||
const blockSlot = block.slot;
|
||||
const blockEpoch = computeEpochAtSlot(blockSlot);
|
||||
const forkSeq = config.getForkSeq(blockSlot);
|
||||
return (
|
||||
config.getForkSeq(blockSlot) >= ForkSeq.deneb &&
|
||||
forkSeq >= ForkSeq.deneb &&
|
||||
forkSeq < ForkSeq.fulu &&
|
||||
// if block is out of ${config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS}, skip this step
|
||||
blockEpoch >= currentEpoch - config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
);
|
||||
})
|
||||
@@ -235,20 +289,70 @@ async function migrateBlobSidecarsFromHotToColdDb(
|
||||
})
|
||||
);
|
||||
|
||||
const migrate = canonicalBlobSidecarsEntries.length > 0;
|
||||
|
||||
if (migrate) {
|
||||
// put to blockArchive db and delete block db
|
||||
await Promise.all([
|
||||
db.blobSidecarsArchive.batchPutBinary(canonicalBlobSidecarsEntries),
|
||||
db.blobSidecars.batchDelete(canonicalBlocks.map((block) => block.root)),
|
||||
]);
|
||||
}
|
||||
|
||||
result = result || migrate;
|
||||
// put to blockArchive db and delete block db
|
||||
await Promise.all([
|
||||
db.blobSidecarsArchive.batchPutBinary(canonicalBlobSidecarsEntries),
|
||||
db.blobSidecars.batchDelete(canonicalBlocks.map((block) => block.root)),
|
||||
]);
|
||||
migratedWrappedBlobSidecars += canonicalBlobSidecarsEntries.length;
|
||||
}
|
||||
|
||||
return result;
|
||||
return migratedWrappedBlobSidecars;
|
||||
}
|
||||
|
||||
// TODO: This function can be simplified further by reducing layers of promises in a loop
|
||||
async function migrateDataColumnSidecarsFromHotToColdDb(
|
||||
config: ChainForkConfig,
|
||||
db: IBeaconDb,
|
||||
logger: Logger,
|
||||
blocks: BlockRootSlot[],
|
||||
currentEpoch: Epoch
|
||||
): Promise<number> {
|
||||
let migratedWrappedDataColumns = 0;
|
||||
for (let i = 0; i < blocks.length; i += BLOB_SIDECAR_BATCH_SIZE) {
|
||||
const toIdx = Math.min(i + BLOB_SIDECAR_BATCH_SIZE, blocks.length);
|
||||
const canonicalBlocks = blocks.slice(i, toIdx);
|
||||
|
||||
// processCanonicalBlocks
|
||||
if (canonicalBlocks.length === 0) break;
|
||||
const promises = [];
|
||||
|
||||
// load Buffer instead of ssz deserialized to improve performance
|
||||
for (const block of canonicalBlocks) {
|
||||
const blockSlot = block.slot;
|
||||
const blockEpoch = computeEpochAtSlot(blockSlot);
|
||||
|
||||
if (
|
||||
config.getForkSeq(blockSlot) < ForkSeq.fulu ||
|
||||
// if block is out of ${config.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS}, skip this step
|
||||
blockEpoch < currentEpoch - config.MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const dataColumnSidecarBytes = await db.dataColumnSidecar.valuesBinary(block.root);
|
||||
// there could be 0 dataColumnSidecarBytes if block has no blob
|
||||
logger.verbose("migrateDataColumnSidecarsFromHotToColdDb", {
|
||||
slot: block.slot,
|
||||
root: toRootHex(block.root),
|
||||
numSidecars: dataColumnSidecarBytes.length,
|
||||
});
|
||||
promises.push(
|
||||
db.dataColumnSidecarArchive.putManyBinary(
|
||||
block.slot,
|
||||
dataColumnSidecarBytes.map((p) => ({key: p.id, value: p.value}))
|
||||
)
|
||||
);
|
||||
migratedWrappedDataColumns += dataColumnSidecarBytes.length;
|
||||
}
|
||||
|
||||
promises.push(db.dataColumnSidecar.deleteMany(canonicalBlocks.map((block) => block.root)));
|
||||
|
||||
// put to blockArchive db and delete block db
|
||||
await Promise.all(promises);
|
||||
}
|
||||
|
||||
return migratedWrappedDataColumns;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -36,4 +36,8 @@ export class BeaconProposerCache {
|
||||
get(proposerIndex: number): string | undefined {
|
||||
return this.feeRecipientByValidatorIndex.get(proposerIndex)?.feeRecipient;
|
||||
}
|
||||
|
||||
getValidatorIndices(): number[] {
|
||||
return Array.from(this.feeRecipientByValidatorIndex.keys());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
NotReorgedReason,
|
||||
} from "@lodestar/fork-choice";
|
||||
import {
|
||||
ForkName,
|
||||
ForkPostAltair,
|
||||
ForkPostElectra,
|
||||
ForkSeq,
|
||||
@@ -512,7 +513,10 @@ export async function importBlock(
|
||||
// dataPromise will not end up here, but preDeneb could. In future we might also allow syncing
|
||||
// out of data range blocks and import then in forkchoice although one would not be able to
|
||||
// attest and propose with such head similar to optimistic sync
|
||||
if (blockInput.type === BlockInputType.availableData) {
|
||||
if (
|
||||
blockInput.type === BlockInputType.availableData &&
|
||||
(blockInput.blockData.fork === ForkName.deneb || blockInput.blockData.fork === ForkName.electra)
|
||||
) {
|
||||
const {blobsSource} = blockInput.blockData;
|
||||
this.metrics?.importBlock.blobsBySource.inc({blobsSource});
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import type {ChainForkConfig} from "@lodestar/config";
|
||||
import {MaybeValidExecutionStatus} from "@lodestar/fork-choice";
|
||||
import {ForkPostDeneb, ForkSeq} from "@lodestar/params";
|
||||
import {type ForkPostDeneb, ForkPostFulu, ForkPreFulu, ForkSeq} from "@lodestar/params";
|
||||
import {CachedBeaconStateAllForks, DataAvailabilityStatus, computeEpochAtSlot} from "@lodestar/state-transition";
|
||||
import {RootHex, SignedBeaconBlock, Slot, deneb} from "@lodestar/types";
|
||||
import type {ColumnIndex, RootHex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types";
|
||||
|
||||
export enum BlockInputType {
|
||||
// preData is preDeneb
|
||||
@@ -21,6 +21,25 @@ export enum BlockSource {
|
||||
byRoot = "req_resp_by_root",
|
||||
}
|
||||
|
||||
export enum GossipedInputType {
|
||||
block = "block",
|
||||
blob = "blob",
|
||||
dataColumn = "data_column",
|
||||
}
|
||||
|
||||
interface CachedDataItem {
|
||||
cacheId: number;
|
||||
}
|
||||
type Availability<T> = {
|
||||
availabilityPromise: Promise<T>;
|
||||
resolveAvailability: (data: T) => void;
|
||||
};
|
||||
|
||||
/**
|
||||
*
|
||||
* Deneb Blob Format Types
|
||||
*
|
||||
*/
|
||||
/** Enum to represent where blobs come from */
|
||||
export enum BlobsSource {
|
||||
gossip = "gossip",
|
||||
@@ -28,31 +47,81 @@ export enum BlobsSource {
|
||||
byRange = "req_resp_by_range",
|
||||
byRoot = "req_resp_by_root",
|
||||
}
|
||||
type ForkBlobsInfo = {
|
||||
fork: ForkPostDeneb & ForkPreFulu;
|
||||
};
|
||||
export type BlockInputBlobs = ForkBlobsInfo & {
|
||||
blobs: deneb.BlobSidecars;
|
||||
blobsSource: BlobsSource;
|
||||
};
|
||||
export type BlobsCacheMap = Map<number, deneb.BlobSidecar>;
|
||||
export type CachedBlobs = CachedDataItem &
|
||||
ForkBlobsInfo &
|
||||
Availability<BlockInputBlobs> & {
|
||||
blobsCache: BlobsCacheMap;
|
||||
};
|
||||
|
||||
export enum GossipedInputType {
|
||||
block = "block",
|
||||
blob = "blob",
|
||||
/**
|
||||
*
|
||||
* PeerDAS Column Format Types
|
||||
*
|
||||
*/
|
||||
|
||||
export enum DataColumnsSource {
|
||||
gossip = "gossip",
|
||||
api = "api",
|
||||
engine = "engine",
|
||||
byRange = "req_resp_by_range",
|
||||
byRoot = "req_resp_by_root",
|
||||
}
|
||||
type ForkDataColumnsInfo = {
|
||||
fork: ForkPostFulu;
|
||||
};
|
||||
type DataColumnData = {
|
||||
dataColumn: fulu.DataColumnSidecar;
|
||||
dataColumnBytes: Uint8Array | null;
|
||||
};
|
||||
export type DataColumnsCacheMap = Map<number, DataColumnData>;
|
||||
export type BlockInputDataColumns = ForkDataColumnsInfo & {
|
||||
// marker of that columns are to be custodied
|
||||
dataColumns: fulu.DataColumnSidecars;
|
||||
dataColumnsBytes: (Uint8Array | null)[];
|
||||
dataColumnsSource: DataColumnsSource;
|
||||
};
|
||||
export type CachedDataColumns = CachedDataItem &
|
||||
ForkDataColumnsInfo &
|
||||
Availability<BlockInputDataColumns> & {
|
||||
dataColumnsCache: DataColumnsCacheMap;
|
||||
calledRecover: boolean;
|
||||
};
|
||||
|
||||
type BlobsCacheMap = Map<number, deneb.BlobSidecar>;
|
||||
/**
|
||||
*
|
||||
* Cross-Fork Data Types
|
||||
*
|
||||
*/
|
||||
|
||||
type ForkBlobsInfo = {fork: ForkPostDeneb};
|
||||
export type BlockInputBlobs = {blobs: deneb.BlobSidecar[]; blobsSource: BlobsSource};
|
||||
export type BlockInputDataBlobs = ForkBlobsInfo & BlockInputBlobs;
|
||||
export type BlockInputData = BlockInputDataBlobs;
|
||||
export type BlockInputAvailableData = BlockInputBlobs | BlockInputDataColumns;
|
||||
export type CachedData = CachedBlobs | CachedDataColumns;
|
||||
|
||||
type Availability<T> = {availabilityPromise: Promise<T>; resolveAvailability: (data: T) => void};
|
||||
|
||||
type CachedBlobs = {blobsCache: BlobsCacheMap} & Availability<BlockInputDataBlobs>;
|
||||
export type CachedData = ForkBlobsInfo & CachedBlobs;
|
||||
|
||||
export type BlockInput = {block: SignedBeaconBlock; source: BlockSource} & (
|
||||
export type BlockInput = {
|
||||
block: SignedBeaconBlock;
|
||||
source: BlockSource;
|
||||
} & (
|
||||
| {type: BlockInputType.preData | BlockInputType.outOfRangeData}
|
||||
| ({type: BlockInputType.availableData} & {blockData: BlockInputData})
|
||||
| ({type: BlockInputType.availableData} & {
|
||||
blockData: BlockInputAvailableData;
|
||||
})
|
||||
// the blobsSource here is added to BlockInputBlobs when availability is resolved
|
||||
| ({type: BlockInputType.dataPromise} & {cachedData: CachedData})
|
||||
| ({type: BlockInputType.dataPromise} & {
|
||||
cachedData: CachedData;
|
||||
})
|
||||
);
|
||||
export type NullBlockInput = {block: null; blockRootHex: RootHex; blockInputPromise: Promise<BlockInput>} & {
|
||||
export type NullBlockInput = {
|
||||
block: null;
|
||||
blockRootHex: RootHex;
|
||||
blockInputPromise: Promise<BlockInput>;
|
||||
} & {
|
||||
cachedData: CachedData;
|
||||
};
|
||||
|
||||
@@ -97,7 +166,7 @@ export const getBlockInput = {
|
||||
config: ChainForkConfig,
|
||||
block: SignedBeaconBlock,
|
||||
source: BlockSource,
|
||||
blockData: BlockInputData
|
||||
blockData: BlockInputAvailableData
|
||||
): BlockInput {
|
||||
if (config.getForkSeq(block.message.slot) < ForkSeq.deneb) {
|
||||
throw Error(`Pre Deneb block slot ${block.message.slot}`);
|
||||
@@ -128,7 +197,7 @@ export const getBlockInput = {
|
||||
},
|
||||
};
|
||||
|
||||
export function getBlockInputBlobs(blobsCache: BlobsCacheMap): Omit<BlockInputBlobs, "blobsSource"> {
|
||||
export function getBlockInputBlobs(blobsCache: BlobsCacheMap): Omit<BlockInputBlobs, "fork" | "blobsSource"> {
|
||||
const blobs = [];
|
||||
|
||||
for (let index = 0; index < blobsCache.size; index++) {
|
||||
@@ -141,6 +210,26 @@ export function getBlockInputBlobs(blobsCache: BlobsCacheMap): Omit<BlockInputBl
|
||||
return {blobs};
|
||||
}
|
||||
|
||||
export function getBlockInputDataColumns(
|
||||
dataColumnsCache: DataColumnsCacheMap,
|
||||
columnIndexes: ColumnIndex[]
|
||||
): Omit<BlockInputDataColumns, "fork" | "dataColumnsSource"> {
|
||||
const dataColumns = [];
|
||||
const dataColumnsBytes = [];
|
||||
|
||||
for (const index of columnIndexes) {
|
||||
const dataColumnCache = dataColumnsCache.get(index);
|
||||
if (dataColumnCache === undefined) {
|
||||
// check if the index is correct as per the custody columns
|
||||
throw Error(`Missing dataColumnCache at index=${index}`);
|
||||
}
|
||||
const {dataColumn: dataColumnSidecar, dataColumnBytes} = dataColumnCache;
|
||||
dataColumns.push(dataColumnSidecar);
|
||||
dataColumnsBytes.push(dataColumnBytes);
|
||||
}
|
||||
return {dataColumns, dataColumnsBytes};
|
||||
}
|
||||
|
||||
export enum AttestationImportOpt {
|
||||
Skip,
|
||||
Force,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// biome-ignore lint/complexity/noUselessStringRaw: We want to have code block in string as raw
|
||||
export const DENEB_BLOWFISH_BANNER = String.raw`
|
||||
|
||||
:-.
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
// biome-ignore lint/complexity/noUselessStringRaw: We want to have code block in string as raw
|
||||
export const ELECTRA_GIRAFFE_BANNER = String.raw`
|
||||
|
||||
2048
|
||||
|
||||
45
packages/beacon-node/src/chain/blocks/utils/zebraBanner.ts
Normal file
45
packages/beacon-node/src/chain/blocks/utils/zebraBanner.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
// biome-ignore lint/complexity/noUselessStringRaw: We want to have code block in string as raw
|
||||
export const FULU_ZEBRA_BANNER = String.raw`
|
||||
|
||||
@
|
||||
------------ @---@
|
||||
----------@--@@-@@@@ @-----@
|
||||
------@@@@@@@@-@-@@@@@@@-@@@-----@--@@--
|
||||
------@@@@@@@@@--@@@@@@@@@@----@@@@@@@@--@---
|
||||
----@@@@@@@@@@---DA---@@@@@@@--@---@@@@@@@--@--
|
||||
----@@@@@@@@@---DAT---DA---@@@@@--@-@--@@@@@@--
|
||||
-------@@@@@@@@@@@@@---DAT--DAT---@@@@--@---@@@@@-
|
||||
----@@@@@@@@@@@@@@@--DATA---DAT--DATAD@@-@--@--@@@@--
|
||||
---@@@@@@@@@@@@@@--DAT---DATA---DAT-DATAD@@@--@@--@@@--@--
|
||||
-@@@@@@@@@@@--DATA----DAT---DAT--DAT-DA-DAT@@@@--@@-@-@-@---
|
||||
---@@@@@@@@@@@@---DAT----DAT---DAT-DAT-DAT-DA-@-@@-@---@-@-@--
|
||||
---@@@@@@@@@@-DAT----DAT----DAT--DAT-DAT--DA-DA-@--@--@--@-@-@-@-
|
||||
--@--@@@@@@@@@@@--DAT----DATA--DATA-DAT-DAT--DA-DA-@@--@--@--@-@@-@-@
|
||||
---@@@@@@@-@@-DATA---DATA---DAT--DATA-DAT--DA--DA-DA--@--@-@@-@-@--@-@-@
|
||||
--@@@@@@@-@-DAT--DAT---DATA--DATA-DATA-DAT--DAT-DA-DA--@-@----------@-@-@-
|
||||
--------@@@@@@@@@-@--DAT--DAT--DATA--DATA--DAT--DAT-DAT-DA-DA-@--@-@-F-U-L-U---@-@-
|
||||
---DAT--DATA-DATA--DAT-DA-DAT--DAT--DATA--DATA--DATA-DAT--DA-DA-D--@-@--@---------@@--@-
|
||||
DATAD-DATAD-DATA--DAT-DA-DATA-DATA--DATA---DAT--DATA--DAT-D--D-DA-@@---@--@-----@@@@@--@-
|
||||
DATA-DATADA-DATA-DAT-DA--DATA-DATA--DATAD--DATA--DAT--DAT-D-DA-DA-@---@--@-@-@------@@@-@-
|
||||
DAT-DATADA--DATA-DA-DAT--DAT--DATA---DATA--DATA--DATA--DATA-DADA-DA--@-@--@@---@@@@---@@-@-
|
||||
DA--DATADA--DATA-DA-DAT--DAT--DATA---DATA--DATAD-DATA--DAT-DADADA@-@@-@--@---@----@@@@--@-@-
|
||||
D--DATADA--DATA-DA--DAT--DAT--DATA---DATA---DATA--DAT--DAT-DATADA-@@-@-@@--@@@@------@@--@@@-
|
||||
---DATAD---DATA-DA-DAT---DAT---DAT---DATA---DATA--DATA-DA--DATAD-@@-@-@@--@@----@@@@---@@-@@@
|
||||
--DATADA--DATA-DAT-DAT---DAT---DAT---DATAD--DATA--DATA-DA-DADADADA-@@-@@-@@---@@@@@@@----@-@@-
|
||||
-DATADA--DATAD-DAT-DAT- -DAT---DAT--DATADA--DATAD--DAT-DA-D-DAD--@@@-@@@@@--@@@-------@@@@@-@@-
|
||||
-DATAD--DATAD-DAT--DAT---DAT---DAT--DATADA--DATAD--DAT-D-DA-D- -@-@@@@-@@@@--@@@------@@@-@@-
|
||||
DATAD--DATAD--DAT--DATA--DATA--DAT--DATADA--DATADA-DATAD-DA- --@@--@@@-@@@---@@@@@-@@@-@@-
|
||||
DATA---DATA--DATA--DATA---DAT--DAT--DATADA---DATAD-DATA-D- --@@-@@--@@@@@--@@@-@@-@@
|
||||
DAT---DATA---DATA--DATA---DATADATA---DATADA--DATAD-DATA- --@@--@@@---@@@@@@@@@@@-
|
||||
DA---DATA---D--DA---DATA---DATADAT---DATADA--DATAD-DAT- --@@--@@---@@@@@@@@@@
|
||||
D---DATA---DA--DAT---DATA---DATADAT---DATAD--DATADA-- --@@----@@@@@@@@@@@
|
||||
---DATA--DATA--DATA---DATA---DATADAT--DATADA-DATAD- -@@@@@@@@@@@@@@@
|
||||
--DATAD-DATA-DA-DATA---DATA---DATADAT--DATAD---D- -@@@@@@@@@@@@@-
|
||||
--DATA-DATA-DAT--DATA---DATA---DATADAT--DATAD-- -@@@@@@@@@@@@-
|
||||
DATAD-DATA--DATA--DATA---DATA---DATADA---DAT- -@@PEERDAS@-
|
||||
DATA--DAT-DAT-DAT--DAT---DATAD----DATA-DAT- --@@@@@@-
|
||||
DATA-DAT-DATA-DAT--DATA---DATAD----DAT-@- ----
|
||||
DATADATADATADA-DAT---DAT---DATAD---DAT--
|
||||
DATADATADATADA--DAT---DAT---DATAD---DA-
|
||||
|
||||
`;
|
||||
@@ -1,6 +1,6 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ExecutionStatus, ProtoBlock} from "@lodestar/fork-choice";
|
||||
import {ForkName} from "@lodestar/params";
|
||||
import {ForkName, isForkPostFulu} from "@lodestar/params";
|
||||
import {
|
||||
CachedBeaconStateAllForks,
|
||||
DataAvailabilityStatus,
|
||||
@@ -18,6 +18,7 @@ import {DENEB_BLOWFISH_BANNER} from "./utils/blowfishBanner.js";
|
||||
import {ELECTRA_GIRAFFE_BANNER} from "./utils/giraffeBanner.js";
|
||||
import {CAPELLA_OWL_BANNER} from "./utils/ownBanner.js";
|
||||
import {POS_PANDA_MERGE_TRANSITION_BANNER} from "./utils/pandaMergeTransitionBanner.js";
|
||||
import {FULU_ZEBRA_BANNER} from "./utils/zebraBanner.js";
|
||||
import {verifyBlocksDataAvailability} from "./verifyBlocksDataAvailability.js";
|
||||
import {SegmentExecStatus, verifyBlocksExecutionPayload} from "./verifyBlocksExecutionPayloads.js";
|
||||
import {verifyBlocksSignatures} from "./verifyBlocksSignatures.js";
|
||||
@@ -144,12 +145,12 @@ export async function verifyBlocksInEpoch(
|
||||
logOnPowBlock(this.logger, this.config, segmentExecStatus.mergeBlockFound);
|
||||
}
|
||||
|
||||
const fromFork = this.config.getForkName(parentBlock.slot);
|
||||
const toFork = this.config.getForkName(lastBlock.message.slot);
|
||||
const fromForkBoundary = this.config.getForkBoundaryAtEpoch(computeEpochAtSlot(parentBlock.slot));
|
||||
const toForkBoundary = this.config.getForkBoundaryAtEpoch(computeEpochAtSlot(lastBlock.message.slot));
|
||||
|
||||
// If transition through toFork, note won't happen if ${toFork}_EPOCH = 0, will log double on re-org
|
||||
if (toFork !== fromFork) {
|
||||
switch (toFork) {
|
||||
if (toForkBoundary.fork !== fromForkBoundary.fork) {
|
||||
switch (toForkBoundary.fork) {
|
||||
case ForkName.capella:
|
||||
this.logger.info(CAPELLA_OWL_BANNER);
|
||||
this.logger.info("Activating withdrawals", {epoch: this.config.CAPELLA_FORK_EPOCH});
|
||||
@@ -165,9 +166,25 @@ export async function verifyBlocksInEpoch(
|
||||
this.logger.info("Activating maxEB", {epoch: this.config.ELECTRA_FORK_EPOCH});
|
||||
break;
|
||||
|
||||
case ForkName.fulu:
|
||||
this.logger.info(FULU_ZEBRA_BANNER);
|
||||
this.logger.info("Activating peerDAS", {epoch: this.config.FULU_FORK_EPOCH});
|
||||
break;
|
||||
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
if (isForkPostFulu(fromForkBoundary.fork)) {
|
||||
const fromBlobParameters = this.config.getBlobParameters(fromForkBoundary.epoch);
|
||||
const toBlobParameters = this.config.getBlobParameters(toForkBoundary.epoch);
|
||||
|
||||
if (toBlobParameters.epoch !== fromBlobParameters.epoch) {
|
||||
const {epoch, maxBlobsPerBlock} = toBlobParameters;
|
||||
|
||||
this.logger.info("Activating BPO fork", {epoch, maxBlobsPerBlock});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (segmentExecStatus.execAborted === null) {
|
||||
|
||||
@@ -1,11 +1,22 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {isForkPostDeneb, isForkPostFulu} from "@lodestar/params";
|
||||
import {DataAvailabilityStatus, computeTimeAtSlot} from "@lodestar/state-transition";
|
||||
import {UintNum64, deneb} from "@lodestar/types";
|
||||
import {ErrorAborted, Logger} from "@lodestar/utils";
|
||||
import {Metrics} from "../../metrics/metrics.js";
|
||||
import {BlockError, BlockErrorCode} from "../errors/index.js";
|
||||
import {validateBlobSidecars} from "../validation/blobSidecar.js";
|
||||
import {BlobSidecarValidation, BlockInput, BlockInputType, ImportBlockOpts, getBlockInput} from "./types.js";
|
||||
import {validateDataColumnsSidecars} from "../validation/dataColumnSidecar.js";
|
||||
import {
|
||||
BlobSidecarValidation,
|
||||
BlockInput,
|
||||
BlockInputAvailableData,
|
||||
BlockInputBlobs,
|
||||
BlockInputDataColumns,
|
||||
BlockInputType,
|
||||
ImportBlockOpts,
|
||||
getBlockInput,
|
||||
} from "./types.js";
|
||||
|
||||
// we can now wait for full 12 seconds because unavailable block sync will try pulling
|
||||
// the blobs from the network anyway after 500ms of seeing the block
|
||||
@@ -71,7 +82,7 @@ export async function verifyBlocksDataAvailability(
|
||||
}
|
||||
|
||||
async function maybeValidateBlobs(
|
||||
chain: {config: ChainForkConfig; genesisTime: UintNum64; logger: Logger},
|
||||
chain: {config: ChainForkConfig; genesisTime: UintNum64; metrics: Metrics | null; logger: Logger},
|
||||
blockInput: BlockInput,
|
||||
signal: AbortSignal,
|
||||
opts: ImportBlockOpts
|
||||
@@ -93,26 +104,38 @@ async function maybeValidateBlobs(
|
||||
// run full validation
|
||||
const {block} = blockInput;
|
||||
const blockSlot = block.message.slot;
|
||||
|
||||
const blobsData =
|
||||
blockInput.type === BlockInputType.availableData
|
||||
? blockInput.blockData
|
||||
: await raceWithCutoff(chain, blockInput, blockInput.cachedData.availabilityPromise, signal);
|
||||
const {blobs} = blobsData;
|
||||
|
||||
const {blobKzgCommitments} = (block as deneb.SignedBeaconBlock).message.body;
|
||||
const beaconBlockRoot = chain.config.getForkTypes(blockSlot).BeaconBlock.hashTreeRoot(block.message);
|
||||
const blockData =
|
||||
blockInput.type === BlockInputType.availableData
|
||||
? blockInput.blockData
|
||||
: await raceWithCutoff(
|
||||
chain,
|
||||
blockInput,
|
||||
blockInput.cachedData.availabilityPromise as Promise<BlockInputAvailableData>,
|
||||
signal
|
||||
);
|
||||
|
||||
// if the blob siddecars have been individually verified then we can skip kzg proof check
|
||||
// but other checks to match blobs with block data still need to be performed
|
||||
const skipProofsCheck = opts.validBlobSidecars === BlobSidecarValidation.Individual;
|
||||
await validateBlobSidecars(blockSlot, beaconBlockRoot, blobKzgCommitments, blobs, {skipProofsCheck});
|
||||
if (isForkPostFulu(blockData.fork)) {
|
||||
const {dataColumns} = blockData as BlockInputDataColumns;
|
||||
const skipProofsCheck = opts.validBlobSidecars === BlobSidecarValidation.Individual;
|
||||
await validateDataColumnsSidecars(blockSlot, beaconBlockRoot, blobKzgCommitments, dataColumns, chain.metrics, {
|
||||
skipProofsCheck,
|
||||
});
|
||||
} else if (isForkPostDeneb(blockData.fork)) {
|
||||
const {blobs} = blockData as BlockInputBlobs;
|
||||
|
||||
// if the blob sidecars have been individually verified then we can skip kzg proof check
|
||||
// but other checks to match blobs with block data still need to be performed
|
||||
const skipProofsCheck = opts.validBlobSidecars === BlobSidecarValidation.Individual;
|
||||
await validateBlobSidecars(blockSlot, beaconBlockRoot, blobKzgCommitments, blobs, {skipProofsCheck});
|
||||
}
|
||||
|
||||
const availableBlockInput = getBlockInput.availableData(
|
||||
chain.config,
|
||||
blockInput.block,
|
||||
blockInput.source,
|
||||
blobsData
|
||||
blockData
|
||||
);
|
||||
return {dataAvailabilityStatus: DataAvailabilityStatus.Available, availableBlockInput: availableBlockInput};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import {toRootHex} from "@lodestar/utils";
|
||||
import {ForkName, isForkPostDeneb, isForkPostFulu} from "@lodestar/params";
|
||||
import {fulu} from "@lodestar/types";
|
||||
import {prettyPrintIndices, toHex, toRootHex} from "@lodestar/utils";
|
||||
import {BeaconChain} from "../chain.js";
|
||||
import {BlockInput, BlockInputType} from "./types.js";
|
||||
import {BlockInput, BlockInputBlobs, BlockInputDataColumns, BlockInputType} from "./types.js";
|
||||
|
||||
/**
|
||||
* Persists block input data to DB. This operation must be eventually completed if a block is imported to the fork-choice.
|
||||
@@ -28,26 +30,60 @@ export async function writeBlockInputToDb(this: BeaconChain, blocksInput: BlockI
|
||||
this.logger.debug("Persist block to hot DB", {
|
||||
slot: block.message.slot,
|
||||
root: blockRootHex,
|
||||
inputType: blockInput.type,
|
||||
});
|
||||
|
||||
if (blockInput.type === BlockInputType.availableData || blockInput.type === BlockInputType.dataPromise) {
|
||||
const blobSidecars =
|
||||
const blockData =
|
||||
blockInput.type === BlockInputType.availableData
|
||||
? blockInput.blockData.blobs
|
||||
: // At this point of import blobs are available and can be safely awaited
|
||||
(await blockInput.cachedData.availabilityPromise).blobs;
|
||||
? blockInput.blockData
|
||||
: await blockInput.cachedData.availabilityPromise;
|
||||
|
||||
// NOTE: Old blobs are pruned on archive
|
||||
fnPromises.push(this.db.blobSidecars.add({blockRoot, slot: block.message.slot, blobSidecars}));
|
||||
this.logger.debug("Persisted blobSidecars to hot DB", {
|
||||
blobsLen: blobSidecars.length,
|
||||
slot: block.message.slot,
|
||||
root: blockRootHex,
|
||||
});
|
||||
// NOTE: Old data is pruned on archive
|
||||
if (isForkPostFulu(blockData.fork)) {
|
||||
const {custodyConfig} = this;
|
||||
const {custodyColumns} = custodyConfig;
|
||||
const blobsLen = (block.message as fulu.BeaconBlock).body.blobKzgCommitments.length;
|
||||
let dataColumnsLen: number;
|
||||
if (blobsLen === 0) {
|
||||
dataColumnsLen = 0;
|
||||
} else {
|
||||
dataColumnsLen = custodyColumns.length;
|
||||
}
|
||||
|
||||
const blockDataColumns = (blockData as BlockInputDataColumns).dataColumns;
|
||||
const dataColumnSidecars = blockDataColumns.filter((dataColumnSidecar) =>
|
||||
custodyColumns.includes(dataColumnSidecar.index)
|
||||
);
|
||||
if (dataColumnSidecars.length !== dataColumnsLen) {
|
||||
throw Error(
|
||||
`Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}`
|
||||
);
|
||||
}
|
||||
|
||||
fnPromises.push(this.db.dataColumnSidecar.putMany(blockRoot, dataColumnSidecars));
|
||||
this.logger.debug("Persisted dataColumnSidecars to hot DB", {
|
||||
dataColumnSidecars: dataColumnSidecars.length,
|
||||
slot: block.message.slot,
|
||||
root: blockRootHex,
|
||||
});
|
||||
} else if (isForkPostDeneb(blockData.fork)) {
|
||||
const blobSidecars = (blockData as BlockInputBlobs).blobs;
|
||||
fnPromises.push(this.db.blobSidecars.add({blockRoot, slot: block.message.slot, blobSidecars}));
|
||||
this.logger.debug("Persisted blobSidecars to hot DB", {
|
||||
blobsLen: blobSidecars.length,
|
||||
slot: block.message.slot,
|
||||
root: blockRootHex,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(fnPromises);
|
||||
this.logger.debug("Persisted blocksInput to db", {
|
||||
blocksInput: blocksInput.length,
|
||||
slots: prettyPrintIndices(blocksInput.map((blockInput) => blockInput.block.message.slot)),
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -56,17 +92,36 @@ export async function writeBlockInputToDb(this: BeaconChain, blocksInput: BlockI
|
||||
export async function removeEagerlyPersistedBlockInputs(this: BeaconChain, blockInputs: BlockInput[]): Promise<void> {
|
||||
const blockToRemove = [];
|
||||
const blobsToRemove = [];
|
||||
const dataColumnsToRemove = [];
|
||||
|
||||
for (const blockInput of blockInputs) {
|
||||
const {block, type} = blockInput;
|
||||
const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message);
|
||||
const blockRootHex = toRootHex(blockRoot);
|
||||
const slot = block.message.slot;
|
||||
const blockRoot = this.config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block.message);
|
||||
const blockRootHex = toHex(blockRoot);
|
||||
if (!this.forkChoice.hasBlockHex(blockRootHex)) {
|
||||
blockToRemove.push(block);
|
||||
|
||||
if (type === BlockInputType.availableData) {
|
||||
const blobSidecars = blockInput.blockData.blobs;
|
||||
blobsToRemove.push({blockRoot, slot: block.message.slot, blobSidecars});
|
||||
const {blockData} = blockInput;
|
||||
if (blockData.fork === ForkName.deneb || blockData.fork === ForkName.electra) {
|
||||
const blobSidecars = blockData.blobs;
|
||||
blobsToRemove.push({blockRoot, slot, blobSidecars});
|
||||
} else {
|
||||
const {custodyConfig} = this;
|
||||
const {custodyColumns} = custodyConfig;
|
||||
const dataColumnsLen = custodyColumns.length;
|
||||
const dataColumnSidecars = (blockData as BlockInputDataColumns).dataColumns.filter((dataColumnSidecar) =>
|
||||
custodyColumns.includes(dataColumnSidecar.index)
|
||||
);
|
||||
if (dataColumnSidecars.length !== dataColumnsLen) {
|
||||
throw Error(
|
||||
`Invalid dataColumnSidecars=${dataColumnSidecars.length} for custody expected custodyColumnsLen=${dataColumnsLen}`
|
||||
);
|
||||
}
|
||||
|
||||
dataColumnsToRemove.push(blockRoot);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -75,5 +130,6 @@ export async function removeEagerlyPersistedBlockInputs(this: BeaconChain, block
|
||||
// TODO: Batch DB operations not with Promise.all but with level db ops
|
||||
this.db.block.batchRemove(blockToRemove),
|
||||
this.db.blobSidecars.batchRemove(blobsToRemove),
|
||||
this.db.dataColumnSidecar.deleteMany(dataColumnsToRemove),
|
||||
]);
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import path from "node:path";
|
||||
import {Worker, spawn} from "@chainsafe/threads";
|
||||
// `threads` library creates self global variable which breaks `timeout-abort-controller` https://github.com/jacobheun/timeout-abort-controller/issues/9
|
||||
// @ts-ignore
|
||||
// biome-ignore lint/suspicious/noGlobalAssign: <explanation>
|
||||
// @ts-expect-error
|
||||
// biome-ignore lint/suspicious/noGlobalAssign: We need the global `self` to reassign module properties later
|
||||
self = undefined;
|
||||
import {PublicKey} from "@chainsafe/blst";
|
||||
import {ISignatureSet} from "@lodestar/state-transition";
|
||||
|
||||
@@ -3,7 +3,7 @@ import {PubkeyIndexMap} from "@chainsafe/pubkey-index-map";
|
||||
import {CompositeTypeAny, TreeView, Type} from "@chainsafe/ssz";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {CheckpointWithHex, ExecutionStatus, IForkChoice, ProtoBlock, UpdateHeadOpt} from "@lodestar/fork-choice";
|
||||
import {ForkSeq, GENESIS_SLOT, SLOTS_PER_EPOCH, isForkPostElectra} from "@lodestar/params";
|
||||
import {EFFECTIVE_BALANCE_INCREMENT, GENESIS_SLOT, SLOTS_PER_EPOCH, isForkPostElectra} from "@lodestar/params";
|
||||
import {
|
||||
BeaconStateAllForks,
|
||||
BeaconStateElectra,
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
computeStartSlotAtEpoch,
|
||||
createCachedBeaconState,
|
||||
getEffectiveBalanceIncrementsZeroInactive,
|
||||
getEffectiveBalancesFromStateBytes,
|
||||
isCachedBeaconState,
|
||||
processSlots,
|
||||
} from "@lodestar/state-transition";
|
||||
@@ -25,30 +26,31 @@ import {
|
||||
BlindedBeaconBlock,
|
||||
BlindedBeaconBlockBody,
|
||||
Epoch,
|
||||
ExecutionPayload,
|
||||
Root,
|
||||
RootHex,
|
||||
SignedBeaconBlock,
|
||||
Slot,
|
||||
Status,
|
||||
UintNum64,
|
||||
ValidatorIndex,
|
||||
Wei,
|
||||
bellatrix,
|
||||
deneb,
|
||||
isBlindedBeaconBlock,
|
||||
phase0,
|
||||
} from "@lodestar/types";
|
||||
import {Logger, fromHex, gweiToWei, isErrorAborted, pruneSetToMax, sleep, toRootHex} from "@lodestar/utils";
|
||||
import {ProcessShutdownCallback} from "@lodestar/validator";
|
||||
|
||||
import {PrivateKey} from "@libp2p/interface";
|
||||
import {LoggerNode} from "@lodestar/logger/node";
|
||||
import {GENESIS_EPOCH, ZERO_HASH} from "../constants/index.js";
|
||||
import {IBeaconDb} from "../db/index.js";
|
||||
import {IEth1ForBlockProduction} from "../eth1/index.js";
|
||||
import {BuilderStatus} from "../execution/builder/http.js";
|
||||
import {IExecutionBuilder, IExecutionEngine} from "../execution/index.js";
|
||||
import {Metrics} from "../metrics/index.js";
|
||||
import {computeNodeIdFromPrivateKey} from "../network/subnets/interface.js";
|
||||
import {BufferPool} from "../util/bufferPool.js";
|
||||
import {Clock, ClockEvent, IClock} from "../util/clock.js";
|
||||
import {CustodyConfig, getValidatorsCustodyRequirement} from "../util/dataColumns.js";
|
||||
import {ensureDir, writeIfNotExist} from "../util/file.js";
|
||||
import {isOptimisticBlock} from "../util/forkChoice.js";
|
||||
import {SerializedCache} from "../util/serializedCache.js";
|
||||
@@ -60,14 +62,7 @@ import {BlockInput} from "./blocks/types.js";
|
||||
import {BlsMultiThreadWorkerPool, BlsSingleThreadVerifier, IBlsVerifier} from "./bls/index.js";
|
||||
import {ChainEvent, ChainEventEmitter} from "./emitter.js";
|
||||
import {ForkchoiceCaller, initializeForkChoice} from "./forkChoice/index.js";
|
||||
import {
|
||||
BlockHash,
|
||||
CommonBlockBody,
|
||||
FindHeadFnName,
|
||||
IBeaconChain,
|
||||
ProposerPreparationData,
|
||||
StateGetOpts,
|
||||
} from "./interface.js";
|
||||
import {CommonBlockBody, FindHeadFnName, IBeaconChain, ProposerPreparationData, StateGetOpts} from "./interface.js";
|
||||
import {LightClientServer} from "./lightClient/index.js";
|
||||
import {
|
||||
AggregatedAttestationPool,
|
||||
@@ -79,7 +74,7 @@ import {
|
||||
import {IChainOptions} from "./options.js";
|
||||
import {PrepareNextSlotScheduler} from "./prepareNextSlot.js";
|
||||
import {computeNewStateRoot} from "./produceBlock/computeNewStateRoot.js";
|
||||
import {AssembledBlockType, BlobsResultType, BlockType} from "./produceBlock/index.js";
|
||||
import {AssembledBlockType, BlockType, ProduceResult} from "./produceBlock/index.js";
|
||||
import {BlockAttributes, produceBlockBody, produceCommonBlockBody} from "./produceBlock/produceBlockBody.js";
|
||||
import {QueuedStateRegenerator, RegenCaller} from "./regen/index.js";
|
||||
import {ReprocessController} from "./reprocess.js";
|
||||
@@ -108,11 +103,13 @@ import {PersistentCheckpointStateCache} from "./stateCache/persistentCheckpoints
|
||||
import {ValidatorMonitor} from "./validatorMonitor.js";
|
||||
|
||||
/**
|
||||
* Arbitrary constants, blobs and payloads should be consumed immediately in the same slot
|
||||
* The maximum number of cached produced results to keep in memory.
|
||||
*
|
||||
* Arbitrary constant. Blobs and payloads should be consumed immediately in the same slot
|
||||
* they are produced. A value of 1 would probably be sufficient. However it's sensible to
|
||||
* allow some margin if the node overloads.
|
||||
*/
|
||||
const DEFAULT_MAX_CACHED_PRODUCED_ROOTS = 4;
|
||||
const DEFAULT_MAX_CACHED_PRODUCED_RESULTS = 4;
|
||||
|
||||
export class BeaconChain implements IBeaconChain {
|
||||
readonly genesisTime: UintNum64;
|
||||
@@ -122,6 +119,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
readonly executionBuilder?: IExecutionBuilder;
|
||||
// Expose config for convenience in modularized functions
|
||||
readonly config: BeaconConfig;
|
||||
readonly custodyConfig: CustodyConfig;
|
||||
readonly logger: Logger;
|
||||
readonly metrics: Metrics | null;
|
||||
readonly validatorMonitor: ValidatorMonitor | null;
|
||||
@@ -153,7 +151,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
readonly seenSyncCommitteeMessages = new SeenSyncCommitteeMessages();
|
||||
readonly seenContributionAndProof: SeenContributionAndProof;
|
||||
readonly seenAttestationDatas: SeenAttestationDatas;
|
||||
readonly seenGossipBlockInput = new SeenGossipBlockInput();
|
||||
readonly seenGossipBlockInput: SeenGossipBlockInput;
|
||||
readonly seenBlockInputCache: SeenBlockInputCache;
|
||||
// Seen cache for liveness checks
|
||||
readonly seenBlockAttesters = new SeenBlockAttesters();
|
||||
@@ -165,14 +163,14 @@ export class BeaconChain implements IBeaconChain {
|
||||
readonly beaconProposerCache: BeaconProposerCache;
|
||||
readonly checkpointBalancesCache: CheckpointBalancesCache;
|
||||
readonly shufflingCache: ShufflingCache;
|
||||
/** Map keyed by executionPayload.blockHash of the block for those blobs */
|
||||
readonly producedContentsCache = new Map<BlockHash, deneb.Contents>();
|
||||
|
||||
// Cache payloads from the local execution so that we can send
|
||||
// and get signed/published blinded versions which beacon node can
|
||||
// assemble into full blocks before publishing to the network.
|
||||
readonly producedBlockRoot = new Map<RootHex, ExecutionPayload | null>();
|
||||
readonly producedBlindedBlockRoot = new Set<RootHex>();
|
||||
/**
|
||||
* Cache produced results (ExecutionPayload, DA Data) from the local execution so that we can send
|
||||
* and get signed/published blinded versions which beacon node can
|
||||
* assemble into full blocks before publishing to the network.
|
||||
*/
|
||||
readonly blockProductionCache = new Map<RootHex, ProduceResult>();
|
||||
|
||||
readonly blacklistedBlocks: Map<RootHex, Slot | null>;
|
||||
|
||||
readonly serializedCache: SerializedCache;
|
||||
@@ -183,10 +181,23 @@ export class BeaconChain implements IBeaconChain {
|
||||
protected readonly db: IBeaconDb;
|
||||
private abortController = new AbortController();
|
||||
private processShutdownCallback: ProcessShutdownCallback;
|
||||
private _earliestAvailableSlot: Slot;
|
||||
|
||||
get earliestAvailableSlot(): Slot {
|
||||
return this._earliestAvailableSlot;
|
||||
}
|
||||
|
||||
set earliestAvailableSlot(slot: Slot) {
|
||||
if (this._earliestAvailableSlot !== slot) {
|
||||
this._earliestAvailableSlot = slot;
|
||||
this.emitter.emit(ChainEvent.updateStatus);
|
||||
}
|
||||
}
|
||||
|
||||
constructor(
|
||||
opts: IChainOptions,
|
||||
{
|
||||
privateKey,
|
||||
config,
|
||||
db,
|
||||
dbName,
|
||||
@@ -201,6 +212,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
executionEngine,
|
||||
executionBuilder,
|
||||
}: {
|
||||
privateKey: PrivateKey;
|
||||
config: BeaconConfig;
|
||||
db: IBeaconDb;
|
||||
dbName: string;
|
||||
@@ -260,6 +272,24 @@ export class BeaconChain implements IBeaconChain {
|
||||
this.seenContributionAndProof = new SeenContributionAndProof(metrics);
|
||||
this.seenAttestationDatas = new SeenAttestationDatas(metrics, this.opts?.attDataCacheSlotDistance);
|
||||
|
||||
const nodeId = computeNodeIdFromPrivateKey(privateKey);
|
||||
const initialCustodyGroupCount =
|
||||
opts.initialCustodyGroupCount ?? (opts.supernode ? config.NUMBER_OF_CUSTODY_GROUPS : config.CUSTODY_REQUIREMENT);
|
||||
this.metrics?.peerDas.targetCustodyGroupCount.set(initialCustodyGroupCount);
|
||||
this.custodyConfig = new CustodyConfig({
|
||||
nodeId,
|
||||
config,
|
||||
initialCustodyGroupCount,
|
||||
});
|
||||
|
||||
this.seenGossipBlockInput = new SeenGossipBlockInput(
|
||||
this.custodyConfig,
|
||||
this.executionEngine,
|
||||
emitter,
|
||||
clock,
|
||||
logger
|
||||
);
|
||||
|
||||
this.beaconProposerCache = new BeaconProposerCache(opts);
|
||||
this.checkpointBalancesCache = new CheckpointBalancesCache();
|
||||
this.seenBlockInputCache = new SeenBlockInputCache({
|
||||
@@ -284,6 +314,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
pubkey2index: new PubkeyIndexMap(),
|
||||
index2pubkey: [],
|
||||
});
|
||||
this._earliestAvailableSlot = cachedState.slot;
|
||||
|
||||
this.shufflingCache = cachedState.epochCtx.shufflingCache = new ShufflingCache(metrics, logger, this.opts, [
|
||||
{
|
||||
@@ -340,6 +371,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
cachedState,
|
||||
opts,
|
||||
this.justifiedBalancesGetter.bind(this),
|
||||
metrics,
|
||||
logger
|
||||
);
|
||||
const regen = new QueuedStateRegenerator({
|
||||
@@ -706,7 +738,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
const proposerIndex = state.epochCtx.getBeaconProposer(slot);
|
||||
const proposerPubKey = state.epochCtx.index2pubkey[proposerIndex].toBytes();
|
||||
|
||||
const {body, blobs, executionPayloadValue, shouldOverrideBuilder} = await produceBlockBody.call(
|
||||
const {body, produceResult, executionPayloadValue, shouldOverrideBuilder} = await produceBlockBody.call(
|
||||
this,
|
||||
blockType,
|
||||
state,
|
||||
@@ -725,7 +757,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
|
||||
// The hashtree root computed here for debug log will get cached and hence won't introduce additional delays
|
||||
const bodyRoot =
|
||||
blockType === BlockType.Full
|
||||
produceResult.type === BlockType.Full
|
||||
? this.config.getForkTypes(slot).BeaconBlockBody.hashTreeRoot(body)
|
||||
: this.config
|
||||
.getPostBellatrixForkTypes(slot)
|
||||
@@ -747,56 +779,18 @@ export class BeaconChain implements IBeaconChain {
|
||||
const {newStateRoot, proposerReward} = computeNewStateRoot(this.metrics, state, block);
|
||||
block.stateRoot = newStateRoot;
|
||||
const blockRoot =
|
||||
blockType === BlockType.Full
|
||||
produceResult.type === BlockType.Full
|
||||
? this.config.getForkTypes(slot).BeaconBlock.hashTreeRoot(block)
|
||||
: this.config.getPostBellatrixForkTypes(slot).BlindedBeaconBlock.hashTreeRoot(block as BlindedBeaconBlock);
|
||||
const blockRootHex = toRootHex(blockRoot);
|
||||
|
||||
// track the produced block for consensus broadcast validations
|
||||
if (blockType === BlockType.Full) {
|
||||
this.logger.debug("Setting executionPayload cache for produced block", {blockRootHex, slot, blockType});
|
||||
this.producedBlockRoot.set(blockRootHex, (block as bellatrix.BeaconBlock).body.executionPayload ?? null);
|
||||
this.metrics?.blockProductionCaches.producedBlockRoot.set(this.producedBlockRoot.size);
|
||||
} else {
|
||||
this.logger.debug("Tracking the produced blinded block", {blockRootHex, slot, blockType});
|
||||
this.producedBlindedBlockRoot.add(blockRootHex);
|
||||
this.metrics?.blockProductionCaches.producedBlindedBlockRoot.set(this.producedBlindedBlockRoot.size);
|
||||
}
|
||||
|
||||
// Cache for latter broadcasting
|
||||
//
|
||||
// blinded blobs will be fetched and added to this cache later before finally
|
||||
// publishing the blinded block's full version
|
||||
if (blobs.type === BlobsResultType.produced) {
|
||||
// body is of full type here
|
||||
const {blockHash, contents} = blobs;
|
||||
this.producedContentsCache.set(blockHash, contents);
|
||||
this.metrics?.blockProductionCaches.producedContentsCache.set(this.producedContentsCache.size);
|
||||
}
|
||||
// Track the produced block for consensus broadcast validations, later validation, etc.
|
||||
this.blockProductionCache.set(blockRootHex, produceResult);
|
||||
this.metrics?.blockProductionCacheSize.set(this.blockProductionCache.size);
|
||||
|
||||
return {block, executionPayloadValue, consensusBlockValue: gweiToWei(proposerReward), shouldOverrideBuilder};
|
||||
}
|
||||
|
||||
/**
|
||||
* https://github.com/ethereum/consensus-specs/blob/dev/specs/eip4844/validator.md#sidecar
|
||||
* def get_blobs_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> BlobSidecars:
|
||||
* return BlobSidecars(
|
||||
* beacon_block_root=hash_tree_root(block),
|
||||
* beacon_block_slot=block.slot,
|
||||
* blobs=blobs,
|
||||
* kzg_aggregated_proof=compute_proof_from_blobs(blobs),
|
||||
* )
|
||||
*/
|
||||
getContents(beaconBlock: deneb.BeaconBlock): deneb.Contents {
|
||||
const blockHash = toRootHex(beaconBlock.body.executionPayload.blockHash);
|
||||
const contents = this.producedContentsCache.get(blockHash);
|
||||
if (!contents) {
|
||||
throw Error(`No contents for executionPayload.blockHash ${blockHash}`);
|
||||
}
|
||||
|
||||
return contents;
|
||||
}
|
||||
|
||||
async processBlock(block: BlockInput, opts?: ImportBlockOpts): Promise<void> {
|
||||
return this.blockProcessor.processBlocksJob([block], opts);
|
||||
}
|
||||
@@ -805,7 +799,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
return this.blockProcessor.processBlocksJob(blocks, opts);
|
||||
}
|
||||
|
||||
getStatus(): phase0.Status {
|
||||
getStatus(): Status {
|
||||
const head = this.forkChoice.getHead();
|
||||
const finalizedCheckpoint = this.forkChoice.getFinalizedCheckpoint();
|
||||
const boundary = this.config.getForkBoundaryAtEpoch(this.clock.currentEpoch);
|
||||
@@ -821,6 +815,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
// TODO: PERFORMANCE: Memoize to prevent re-computing every time
|
||||
headRoot: fromHex(head.blockRoot),
|
||||
headSlot: head.slot,
|
||||
earliestAvailableSlot: this._earliestAvailableSlot,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1110,17 +1105,11 @@ export class BeaconChain implements IBeaconChain {
|
||||
metrics.opPool.blsToExecutionChangePoolSize.set(this.opPool.blsToExecutionChangeSize);
|
||||
metrics.chain.blacklistedBlocks.set(this.blacklistedBlocks.size);
|
||||
|
||||
const forkChoiceMetrics = this.forkChoice.getMetrics();
|
||||
metrics.forkChoice.votes.set(forkChoiceMetrics.votes);
|
||||
metrics.forkChoice.queuedAttestations.set(forkChoiceMetrics.queuedAttestations);
|
||||
metrics.forkChoice.validatedAttestationDatas.set(forkChoiceMetrics.validatedAttestationDatas);
|
||||
metrics.forkChoice.balancesLength.set(forkChoiceMetrics.balancesLength);
|
||||
metrics.forkChoice.nodes.set(forkChoiceMetrics.nodes);
|
||||
metrics.forkChoice.indices.set(forkChoiceMetrics.indices);
|
||||
const headState = this.getHeadState();
|
||||
const fork = this.config.getForkName(headState.slot);
|
||||
|
||||
const fork = this.config.getForkName(this.clock.currentSlot);
|
||||
if (isForkPostElectra(fork)) {
|
||||
const headStateElectra = this.getHeadState() as BeaconStateElectra;
|
||||
const headStateElectra = headState as BeaconStateElectra;
|
||||
metrics.pendingDeposits.set(headStateElectra.pendingDeposits.length);
|
||||
metrics.pendingPartialWithdrawals.set(headStateElectra.pendingPartialWithdrawals.length);
|
||||
metrics.pendingConsolidations.set(headStateElectra.pendingConsolidations.length);
|
||||
@@ -1146,16 +1135,8 @@ export class BeaconChain implements IBeaconChain {
|
||||
this.reprocessController.onSlot(slot);
|
||||
|
||||
// Prune old cached block production artifacts, those are only useful on their slot
|
||||
pruneSetToMax(this.producedBlockRoot, this.opts.maxCachedProducedRoots ?? DEFAULT_MAX_CACHED_PRODUCED_ROOTS);
|
||||
this.metrics?.blockProductionCaches.producedBlockRoot.set(this.producedBlockRoot.size);
|
||||
|
||||
pruneSetToMax(this.producedBlindedBlockRoot, this.opts.maxCachedProducedRoots ?? DEFAULT_MAX_CACHED_PRODUCED_ROOTS);
|
||||
this.metrics?.blockProductionCaches.producedBlindedBlockRoot.set(this.producedBlindedBlockRoot.size);
|
||||
|
||||
if (this.config.getForkSeq(slot) >= ForkSeq.deneb) {
|
||||
pruneSetToMax(this.producedContentsCache, this.opts.maxCachedProducedRoots ?? DEFAULT_MAX_CACHED_PRODUCED_ROOTS);
|
||||
this.metrics?.blockProductionCaches.producedContentsCache.set(this.producedContentsCache.size);
|
||||
}
|
||||
pruneSetToMax(this.blockProductionCache, this.opts.maxCachedProducedRoots ?? DEFAULT_MAX_CACHED_PRODUCED_RESULTS);
|
||||
this.metrics?.blockProductionCacheSize.set(this.blockProductionCache.size);
|
||||
|
||||
const metrics = this.metrics;
|
||||
if (metrics && (slot + 1) % SLOTS_PER_EPOCH === 0) {
|
||||
@@ -1202,6 +1183,9 @@ export class BeaconChain implements IBeaconChain {
|
||||
this.logger.verbose("Fork choice finalized", {epoch: cp.epoch, root: cp.rootHex});
|
||||
this.seenBlockProposers.prune(computeStartSlotAtEpoch(cp.epoch));
|
||||
|
||||
// Update validator custody to account for effective balance changes
|
||||
await this.updateValidatorsCustodyRequirement(cp);
|
||||
|
||||
// TODO: Improve using regen here
|
||||
const {blockRoot, stateRoot, slot} = this.forkChoice.getHead();
|
||||
const headState = this.regen.getStateSync(stateRoot);
|
||||
@@ -1220,9 +1204,74 @@ export class BeaconChain implements IBeaconChain {
|
||||
}
|
||||
|
||||
async updateBeaconProposerData(epoch: Epoch, proposers: ProposerPreparationData[]): Promise<void> {
|
||||
const previousValidatorCount = this.beaconProposerCache.getValidatorIndices().length;
|
||||
|
||||
for (const proposer of proposers) {
|
||||
this.beaconProposerCache.add(epoch, proposer);
|
||||
}
|
||||
|
||||
const newValidatorCount = this.beaconProposerCache.getValidatorIndices().length;
|
||||
|
||||
// Only update validator custody if we discovered new validators
|
||||
if (newValidatorCount > previousValidatorCount) {
|
||||
const finalizedCheckpoint = this.forkChoice.getFinalizedCheckpoint();
|
||||
await this.updateValidatorsCustodyRequirement(finalizedCheckpoint);
|
||||
}
|
||||
}
|
||||
|
||||
private async updateValidatorsCustodyRequirement(finalizedCheckpoint: CheckpointWithHex): Promise<void> {
|
||||
if (this.opts.supernode) {
|
||||
// Disable dynamic custody updates for supernodes since they must maintain custody
|
||||
// of all custody groups regardless of validator effective balances
|
||||
return;
|
||||
}
|
||||
|
||||
// Validators attached to the node
|
||||
const validatorIndices = this.beaconProposerCache.getValidatorIndices();
|
||||
|
||||
// Update custody requirement based on finalized state
|
||||
let effectiveBalances: number[];
|
||||
const effectiveBalanceIncrements = this.checkpointBalancesCache.get(finalizedCheckpoint);
|
||||
if (effectiveBalanceIncrements) {
|
||||
effectiveBalances = validatorIndices.map(
|
||||
(index) => (effectiveBalanceIncrements[index] ?? 0) * EFFECTIVE_BALANCE_INCREMENT
|
||||
);
|
||||
} else {
|
||||
// If there's no cached effective balances, get the state from disk and parse them out
|
||||
this.logger.debug("No cached finalized effective balances to update target custody group count", {
|
||||
finalizedEpoch: finalizedCheckpoint.epoch,
|
||||
finalizedRoot: finalizedCheckpoint.rootHex,
|
||||
});
|
||||
|
||||
const stateOrBytes = (await this.getStateOrBytesByCheckpoint(finalizedCheckpoint))?.state;
|
||||
if (!stateOrBytes) {
|
||||
// If even the state is not available, we cannot update the custody group count
|
||||
this.logger.debug("No finalized state or bytes available to update target custody group count", {
|
||||
finalizedEpoch: finalizedCheckpoint.epoch,
|
||||
finalizedRoot: finalizedCheckpoint.rootHex,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (stateOrBytes instanceof Uint8Array) {
|
||||
effectiveBalances = getEffectiveBalancesFromStateBytes(this.config, stateOrBytes, validatorIndices);
|
||||
} else {
|
||||
effectiveBalances = validatorIndices.map((index) => stateOrBytes.validators.get(index).effectiveBalance ?? 0);
|
||||
}
|
||||
}
|
||||
|
||||
const targetCustodyGroupCount = getValidatorsCustodyRequirement(this.config, effectiveBalances);
|
||||
// Only update if target is increased
|
||||
if (targetCustodyGroupCount > this.custodyConfig.targetCustodyGroupCount) {
|
||||
this.custodyConfig.updateTargetCustodyGroupCount(targetCustodyGroupCount);
|
||||
this.metrics?.peerDas.targetCustodyGroupCount.set(targetCustodyGroupCount);
|
||||
this.logger.verbose("Updated target custody group count", {
|
||||
finalizedEpoch: finalizedCheckpoint.epoch,
|
||||
validatorCount: validatorIndices.length,
|
||||
targetCustodyGroupCount,
|
||||
});
|
||||
this.emitter.emit(ChainEvent.updateTargetCustodyGroupCount, targetCustodyGroupCount);
|
||||
}
|
||||
}
|
||||
|
||||
updateBuilderStatus(clockSlot: Slot): void {
|
||||
@@ -1233,7 +1282,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
const previousStatus = executionBuilder.status;
|
||||
const shouldEnable = slotsPresent >= Math.min(faultInspectionWindow - allowedFaults, clockSlot);
|
||||
|
||||
executionBuilder.updateStatus(shouldEnable);
|
||||
executionBuilder.updateStatus(shouldEnable ? BuilderStatus.enabled : BuilderStatus.circuitBreaker);
|
||||
// The status changed we should log
|
||||
const status = executionBuilder.status;
|
||||
const builderLog = {
|
||||
@@ -1243,9 +1292,9 @@ export class BeaconChain implements IBeaconChain {
|
||||
allowedFaults,
|
||||
};
|
||||
if (status !== previousStatus) {
|
||||
this.logger.info("Execution builder status updated", builderLog);
|
||||
this.logger.info("External builder status updated", builderLog);
|
||||
} else {
|
||||
this.logger.verbose("Execution builder status", builderLog);
|
||||
this.logger.verbose("External builder status", builderLog);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import {StrictEventEmitter} from "strict-event-emitter-types";
|
||||
import {routes} from "@lodestar/api";
|
||||
import {CheckpointWithHex} from "@lodestar/fork-choice";
|
||||
import {CachedBeaconStateAllForks} from "@lodestar/state-transition";
|
||||
import {phase0} from "@lodestar/types";
|
||||
import {fulu, phase0} from "@lodestar/types";
|
||||
|
||||
/**
|
||||
* Important chain events that occur during normal chain operation.
|
||||
@@ -34,6 +34,19 @@ export enum ChainEvent {
|
||||
* This event is guaranteed to be triggered whenever the fork choice justified checkpoint is updated. This is in response to a newly processed block.
|
||||
*/
|
||||
forkChoiceFinalized = "forkChoice:finalized",
|
||||
/**
|
||||
* This event signals that dependent services (e.g. custody sampling) should update to account for the new target group count.
|
||||
*/
|
||||
updateTargetCustodyGroupCount = "updateTargetCustodyGroupCount",
|
||||
/**
|
||||
* This event signals that data columns have been fetched from the execution engine
|
||||
* and are ready to be published.
|
||||
*/
|
||||
publishDataColumns = "publishDataColumns",
|
||||
/**
|
||||
* Trigger an update of status so reqresp by peers have current earliestAvailableSlot
|
||||
*/
|
||||
updateStatus = "updateStatus",
|
||||
}
|
||||
|
||||
export type HeadEventData = routes.events.EventData[routes.events.EventType.head];
|
||||
@@ -47,6 +60,12 @@ export type IChainEvents = ApiEvents & {
|
||||
|
||||
[ChainEvent.forkChoiceJustified]: (checkpoint: CheckpointWithHex) => void;
|
||||
[ChainEvent.forkChoiceFinalized]: (checkpoint: CheckpointWithHex) => void;
|
||||
|
||||
[ChainEvent.updateTargetCustodyGroupCount]: (targetGroupCount: number) => void;
|
||||
|
||||
[ChainEvent.publishDataColumns]: (sidecars: fulu.DataColumnSidecar[]) => void;
|
||||
|
||||
[ChainEvent.updateStatus]: () => void;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
import {RootHex, Slot, SubnetID} from "@lodestar/types";
|
||||
import {GossipActionError} from "./gossipValidation.js";
|
||||
|
||||
export enum DataColumnSidecarErrorCode {
|
||||
INVALID_INDEX = "DATA_COLUMN_SIDECAR_ERROR_INVALID_INDEX",
|
||||
NO_COMMITMENTS = "DATA_COLUMN_SIDECAR_ERROR_NO_COMMITMENTS",
|
||||
MISMATCHED_LENGTHS = "DATA_COLUMN_SIDECAR_ERROR_MISMATCHED_LENGTHS",
|
||||
INVALID_SUBNET = "DATA_COLUMN_SIDECAR_ERROR_INVALID_SUBNET",
|
||||
INVALID_KZG_PROOF = "DATA_COLUMN_SIDECAR_ERROR_INVALID_KZG_PROOF",
|
||||
|
||||
// following errors are adapted from the block errors
|
||||
ALREADY_KNOWN = "DATA_COLUMN_SIDECAR_ERROR_ALREADY_KNOWN",
|
||||
FUTURE_SLOT = "DATA_COLUMN_SIDECAR_ERROR_FUTURE_SLOT",
|
||||
WOULD_REVERT_FINALIZED_SLOT = "DATA_COLUMN_SIDECAR_ERROR_WOULD_REVERT_FINALIZED_SLOT",
|
||||
PARENT_UNKNOWN = "DATA_COLUMN_SIDECAR_ERROR_PARENT_UNKNOWN",
|
||||
NOT_LATER_THAN_PARENT = "DATA_COLUMN_SIDECAR_ERROR_NOT_LATER_THAN_PARENT",
|
||||
PROPOSAL_SIGNATURE_INVALID = "DATA_COLUMN_SIDECAR_ERROR_PROPOSAL_SIGNATURE_INVALID",
|
||||
INCLUSION_PROOF_INVALID = "DATA_COLUMN_SIDECAR_ERROR_INCLUSION_PROOF_INVALID",
|
||||
INCORRECT_PROPOSER = "DATA_COLUMN_SIDECAR_ERROR_INCORRECT_PROPOSER",
|
||||
}
|
||||
|
||||
export type DataColumnSidecarErrorType =
|
||||
| {code: DataColumnSidecarErrorCode.INVALID_INDEX; columnIdx: number}
|
||||
| {code: DataColumnSidecarErrorCode.NO_COMMITMENTS; columnIdx: number}
|
||||
| {
|
||||
code: DataColumnSidecarErrorCode.MISMATCHED_LENGTHS;
|
||||
columnLength: number;
|
||||
commitmentsLength: number;
|
||||
proofsLength: number;
|
||||
}
|
||||
| {code: DataColumnSidecarErrorCode.INVALID_SUBNET; columnIdx: number; gossipSubnet: SubnetID}
|
||||
| {code: DataColumnSidecarErrorCode.ALREADY_KNOWN; columnIdx: number; slot: Slot}
|
||||
| {code: DataColumnSidecarErrorCode.FUTURE_SLOT; blockSlot: Slot; currentSlot: Slot}
|
||||
| {code: DataColumnSidecarErrorCode.WOULD_REVERT_FINALIZED_SLOT; blockSlot: Slot; finalizedSlot: Slot}
|
||||
| {code: DataColumnSidecarErrorCode.PARENT_UNKNOWN; parentRoot: RootHex}
|
||||
| {code: DataColumnSidecarErrorCode.PROPOSAL_SIGNATURE_INVALID}
|
||||
| {code: DataColumnSidecarErrorCode.NOT_LATER_THAN_PARENT; parentSlot: Slot; slot: Slot}
|
||||
| {code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID; slot: Slot; columnIdx: number}
|
||||
| {code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF; slot: Slot; columnIdx: number}
|
||||
| {code: DataColumnSidecarErrorCode.INCORRECT_PROPOSER; actualProposerIndex: number; expectedProposerIndex: number};
|
||||
|
||||
export class DataColumnSidecarGossipError extends GossipActionError<DataColumnSidecarErrorType> {}
|
||||
@@ -1,6 +1,7 @@
|
||||
export * from "./attestationError.js";
|
||||
export * from "./attesterSlashingError.js";
|
||||
export * from "./blobSidecarError.js";
|
||||
export * from "./dataColumnSidecarError.js";
|
||||
export * from "./blockError.js";
|
||||
export * from "./gossipValidation.js";
|
||||
export * from "./proposerSlashingError.js";
|
||||
|
||||
@@ -19,6 +19,7 @@ import {Slot} from "@lodestar/types";
|
||||
|
||||
import {Logger, toRootHex} from "@lodestar/utils";
|
||||
import {GENESIS_SLOT} from "../../constants/index.js";
|
||||
import {Metrics} from "../../metrics/index.js";
|
||||
import {ChainEventEmitter} from "../emitter.js";
|
||||
import {ChainEvent} from "../emitter.js";
|
||||
|
||||
@@ -42,6 +43,7 @@ export function initializeForkChoice(
|
||||
state: CachedBeaconStateAllForks,
|
||||
opts: ForkChoiceOpts,
|
||||
justifiedBalancesGetter: JustifiedBalancesGetter,
|
||||
metrics: Metrics | null,
|
||||
logger?: Logger
|
||||
): ForkChoice {
|
||||
const {blockHeader, checkpoint} = computeAnchorCheckpoint(config, state);
|
||||
@@ -105,6 +107,7 @@ export function initializeForkChoice(
|
||||
},
|
||||
currentSlot
|
||||
),
|
||||
metrics,
|
||||
opts,
|
||||
logger
|
||||
);
|
||||
|
||||
@@ -12,17 +12,16 @@ import {
|
||||
BeaconBlock,
|
||||
BlindedBeaconBlock,
|
||||
Epoch,
|
||||
ExecutionPayload,
|
||||
Root,
|
||||
RootHex,
|
||||
SignedBeaconBlock,
|
||||
Slot,
|
||||
Status,
|
||||
UintNum64,
|
||||
ValidatorIndex,
|
||||
Wei,
|
||||
altair,
|
||||
capella,
|
||||
deneb,
|
||||
phase0,
|
||||
} from "@lodestar/types";
|
||||
import {Logger} from "@lodestar/utils";
|
||||
@@ -31,6 +30,7 @@ import {IExecutionBuilder, IExecutionEngine} from "../execution/index.js";
|
||||
import {Metrics} from "../metrics/metrics.js";
|
||||
import {BufferPool} from "../util/bufferPool.js";
|
||||
import {IClock} from "../util/clock.js";
|
||||
import {CustodyConfig} from "../util/dataColumns.js";
|
||||
import {SerializedCache} from "../util/serializedCache.js";
|
||||
import {IArchiveStore} from "./archiveStore/interface.js";
|
||||
import {CheckpointBalancesCache} from "./balancesCache.js";
|
||||
@@ -43,7 +43,7 @@ import {LightClientServer} from "./lightClient/index.js";
|
||||
import {AggregatedAttestationPool} from "./opPools/aggregatedAttestationPool.js";
|
||||
import {AttestationPool, OpPool, SyncCommitteeMessagePool, SyncContributionAndProofPool} from "./opPools/index.js";
|
||||
import {IChainOptions} from "./options.js";
|
||||
import {AssembledBlockType, BlockAttributes, BlockType} from "./produceBlock/produceBlockBody.js";
|
||||
import {AssembledBlockType, BlockAttributes, BlockType, ProduceResult} from "./produceBlock/produceBlockBody.js";
|
||||
import {IStateRegenerator, RegenCaller} from "./regen/index.js";
|
||||
import {ReprocessController} from "./reprocess.js";
|
||||
import {AttestationsRewards} from "./rewards/attestationsRewards.js";
|
||||
@@ -90,6 +90,7 @@ export interface IBeaconChain {
|
||||
readonly executionBuilder?: IExecutionBuilder;
|
||||
// Expose config for convenience in modularized functions
|
||||
readonly config: BeaconConfig;
|
||||
readonly custodyConfig: CustodyConfig;
|
||||
readonly logger: Logger;
|
||||
readonly metrics: Metrics | null;
|
||||
readonly validatorMonitor: ValidatorMonitor | null;
|
||||
@@ -131,10 +132,10 @@ export interface IBeaconChain {
|
||||
|
||||
readonly beaconProposerCache: BeaconProposerCache;
|
||||
readonly checkpointBalancesCache: CheckpointBalancesCache;
|
||||
readonly producedContentsCache: Map<BlockHash, deneb.Contents>;
|
||||
readonly producedBlockRoot: Map<RootHex, ExecutionPayload | null>;
|
||||
|
||||
readonly blockProductionCache: Map<RootHex, ProduceResult>;
|
||||
|
||||
readonly shufflingCache: ShufflingCache;
|
||||
readonly producedBlindedBlockRoot: Set<RootHex>;
|
||||
readonly blacklistedBlocks: Map<RootHex, Slot | null>;
|
||||
// Cache for serialized objects
|
||||
readonly serializedCache: SerializedCache;
|
||||
@@ -196,8 +197,6 @@ export interface IBeaconChain {
|
||||
root: RootHex
|
||||
): Promise<{block: SignedBeaconBlock; executionOptimistic: boolean; finalized: boolean} | null>;
|
||||
|
||||
getContents(beaconBlock: deneb.BeaconBlock): deneb.Contents;
|
||||
|
||||
produceCommonBlockBody(blockAttributes: BlockAttributes): Promise<CommonBlockBody>;
|
||||
produceBlock(blockAttributes: BlockAttributes & {commonBlockBodyPromise?: Promise<CommonBlockBody>}): Promise<{
|
||||
block: BeaconBlock;
|
||||
@@ -216,7 +215,7 @@ export interface IBeaconChain {
|
||||
/** Process a chain of blocks until complete */
|
||||
processChainSegment(blocks: BlockInput[], opts?: ImportBlockOpts): Promise<void>;
|
||||
|
||||
getStatus(): phase0.Status;
|
||||
getStatus(): Status;
|
||||
|
||||
recomputeForkChoiceHead(caller: ForkchoiceCaller): ProtoBlock;
|
||||
|
||||
|
||||
@@ -10,6 +10,12 @@ import {
|
||||
MAX_COMMITTEES_PER_SLOT,
|
||||
MIN_ATTESTATION_INCLUSION_DELAY,
|
||||
SLOTS_PER_EPOCH,
|
||||
TIMELY_HEAD_FLAG_INDEX,
|
||||
TIMELY_HEAD_WEIGHT,
|
||||
TIMELY_SOURCE_FLAG_INDEX,
|
||||
TIMELY_SOURCE_WEIGHT,
|
||||
TIMELY_TARGET_FLAG_INDEX,
|
||||
TIMELY_TARGET_WEIGHT,
|
||||
isForkPostDeneb,
|
||||
isForkPostElectra,
|
||||
} from "@lodestar/params";
|
||||
@@ -18,9 +24,11 @@ import {
|
||||
CachedBeaconStateAltair,
|
||||
CachedBeaconStatePhase0,
|
||||
EffectiveBalanceIncrements,
|
||||
RootCache,
|
||||
computeEpochAtSlot,
|
||||
computeSlotsSinceEpochStart,
|
||||
computeStartSlotAtEpoch,
|
||||
getAttestationParticipationStatus,
|
||||
getBlockRootAtSlot,
|
||||
} from "@lodestar/state-transition";
|
||||
import {
|
||||
@@ -123,6 +131,11 @@ const MAX_ATTESTATIONS_PER_GROUP_ELECTRA = Math.min(
|
||||
MAX_ATTESTATIONS_ELECTRA
|
||||
);
|
||||
|
||||
/** Same to https://github.com/ethereum/consensus-specs/blob/v1.5.0/specs/altair/beacon-chain.md#has_flag */
|
||||
const TIMELY_SOURCE = 1 << TIMELY_SOURCE_FLAG_INDEX;
|
||||
const TIMELY_TARGET = 1 << TIMELY_TARGET_FLAG_INDEX;
|
||||
const TIMELY_HEAD = 1 << TIMELY_HEAD_FLAG_INDEX;
|
||||
|
||||
export enum ScannedSlotsTerminationReason {
|
||||
MaxConsolidationReached = "max_consolidation_reached",
|
||||
ScannedAllSlots = "scanned_all_slots",
|
||||
@@ -346,6 +359,7 @@ export class AggregatedAttestationPool {
|
||||
const stateSlot = state.slot;
|
||||
const stateEpoch = state.epochCtx.epoch;
|
||||
const statePrevEpoch = stateEpoch - 1;
|
||||
const rootCache = new RootCache(state);
|
||||
|
||||
const notSeenValidatorsFn = getNotSeenValidatorsFn(state);
|
||||
const validateAttestationDataFn = getValidateAttestationDataFn(forkChoice, state);
|
||||
@@ -466,7 +480,22 @@ export class AggregatedAttestationPool {
|
||||
|
||||
// after all committees are processed, we have a list of sameAttDataCons
|
||||
for (const consolidation of sameAttDataCons) {
|
||||
const score = consolidation.totalNewSeenEffectiveBalance / inclusionDistance;
|
||||
// Score attestations by profitability to maximize proposer reward
|
||||
const flags = getAttestationParticipationStatus(
|
||||
ForkSeq[fork],
|
||||
consolidation.attData,
|
||||
inclusionDistance,
|
||||
stateEpoch,
|
||||
rootCache
|
||||
);
|
||||
|
||||
const weight =
|
||||
((flags & TIMELY_SOURCE) === TIMELY_SOURCE ? TIMELY_SOURCE_WEIGHT : 0) +
|
||||
((flags & TIMELY_TARGET) === TIMELY_TARGET ? TIMELY_TARGET_WEIGHT : 0) +
|
||||
((flags & TIMELY_HEAD) === TIMELY_HEAD ? TIMELY_HEAD_WEIGHT : 0);
|
||||
|
||||
const score = consolidation.totalNewSeenEffectiveBalance * weight;
|
||||
|
||||
consolidations.set(consolidation, score);
|
||||
// Stop accumulating attestations there are enough that may have good scoring
|
||||
if (consolidations.size >= MAX_ATTESTATIONS_ELECTRA * 2) {
|
||||
|
||||
@@ -111,7 +111,7 @@ export class AttestationPool {
|
||||
committeeIndex: CommitteeIndex,
|
||||
attestation: SingleAttestation,
|
||||
attDataRootHex: RootHex,
|
||||
committeeValidatorIndex: number,
|
||||
validatorCommitteeIndex: number,
|
||||
committeeSize: number,
|
||||
priority?: boolean
|
||||
): InsertOutcome {
|
||||
@@ -154,10 +154,10 @@ export class AttestationPool {
|
||||
const aggregate = aggregateByIndex.get(committeeIndex);
|
||||
if (aggregate) {
|
||||
// Aggregate mutating
|
||||
return aggregateAttestationInto(aggregate, attestation, committeeValidatorIndex);
|
||||
return aggregateAttestationInto(aggregate, attestation, validatorCommitteeIndex);
|
||||
}
|
||||
// Create new aggregate
|
||||
aggregateByIndex.set(committeeIndex, attestationToAggregate(attestation, committeeValidatorIndex, committeeSize));
|
||||
aggregateByIndex.set(committeeIndex, attestationToAggregate(attestation, validatorCommitteeIndex, committeeSize));
|
||||
return InsertOutcome.NewData;
|
||||
}
|
||||
|
||||
@@ -229,12 +229,12 @@ export class AttestationPool {
|
||||
function aggregateAttestationInto(
|
||||
aggregate: AggregateFast,
|
||||
attestation: SingleAttestation,
|
||||
committeeValidatorIndex: number
|
||||
validatorCommitteeIndex: number
|
||||
): InsertOutcome {
|
||||
let bitIndex: number | null;
|
||||
|
||||
if (isElectraSingleAttestation(attestation)) {
|
||||
bitIndex = committeeValidatorIndex;
|
||||
bitIndex = validatorCommitteeIndex;
|
||||
} else {
|
||||
bitIndex = attestation.aggregationBits.getSingleTrueBit();
|
||||
}
|
||||
@@ -256,13 +256,13 @@ function aggregateAttestationInto(
|
||||
*/
|
||||
function attestationToAggregate(
|
||||
attestation: SingleAttestation,
|
||||
committeeValidatorIndex: number,
|
||||
validatorCommitteeIndex: number,
|
||||
committeeSize: number
|
||||
): AggregateFast {
|
||||
if (isElectraSingleAttestation(attestation)) {
|
||||
return {
|
||||
data: attestation.data,
|
||||
aggregationBits: BitArray.fromSingleBit(committeeSize, committeeValidatorIndex),
|
||||
aggregationBits: BitArray.fromSingleBit(committeeSize, validatorCommitteeIndex),
|
||||
committeeBits: BitArray.fromSingleBit(MAX_COMMITTEES_PER_SLOT, attestation.committeeIndex),
|
||||
signature: signatureFromBytesNoCheck(attestation.signature),
|
||||
};
|
||||
|
||||
@@ -39,9 +39,12 @@ export type IChainOptions = BlockProcessOpts &
|
||||
maxCachedBlobSidecars?: number;
|
||||
/** Max number of produced block roots (blinded or full) cached for broadcast validations */
|
||||
maxCachedProducedRoots?: number;
|
||||
/** Subscribe to and custody all data column sidecar subnets */
|
||||
supernode?: boolean;
|
||||
initialCustodyGroupCount?: number;
|
||||
broadcastValidationStrictness?: string;
|
||||
minSameMessageSignatureSetsToBatch: number;
|
||||
archiveBlobEpochs?: number;
|
||||
archiveDateEpochs?: number;
|
||||
nHistoricalStates?: boolean;
|
||||
nHistoricalStatesFileDataStore?: boolean;
|
||||
};
|
||||
@@ -113,6 +116,7 @@ export const defaultChainOptions: IChainOptions = {
|
||||
archiveMode: DEFAULT_ARCHIVE_MODE,
|
||||
pruneHistory: false,
|
||||
emitPayloadAttributes: false,
|
||||
supernode: false,
|
||||
// for gossip block validation, it's unlikely we see a reorg with 32 slots
|
||||
// for attestation validation, having this value ensures we don't have to regen states most of the time
|
||||
maxSkipSlots: 32,
|
||||
|
||||
@@ -13,6 +13,7 @@ import {
|
||||
import {Slot} from "@lodestar/types";
|
||||
import {Logger, fromHex, isErrorAborted, sleep} from "@lodestar/utils";
|
||||
import {GENESIS_SLOT, ZERO_HASH_HEX} from "../constants/constants.js";
|
||||
import {BuilderStatus} from "../execution/builder/http.js";
|
||||
import {Metrics} from "../metrics/index.js";
|
||||
import {ClockEvent} from "../util/clock.js";
|
||||
import {isQueueErrorAborted} from "../util/queue/index.js";
|
||||
@@ -154,7 +155,7 @@ export class PrepareNextSlotScheduler {
|
||||
|
||||
// Update the builder status, if enabled shoot an api call to check status
|
||||
this.chain.updateBuilderStatus(clockSlot);
|
||||
if (this.chain.executionBuilder?.status) {
|
||||
if (this.chain.executionBuilder?.status === BuilderStatus.enabled) {
|
||||
this.chain.executionBuilder.checkStatus().catch((e) => {
|
||||
this.logger.error("Builder disabled as the check status api failed", {prepareSlot}, e as Error);
|
||||
});
|
||||
@@ -190,7 +191,10 @@ export class PrepareNextSlotScheduler {
|
||||
this.computeStateHashTreeRoot(updatedPrepareState, isEpochTransition);
|
||||
|
||||
// If emitPayloadAttributes is true emit a SSE payloadAttributes event
|
||||
if (this.chain.opts.emitPayloadAttributes === true) {
|
||||
if (
|
||||
this.chain.opts.emitPayloadAttributes === true &&
|
||||
this.chain.emitter.listenerCount(routes.events.EventType.payloadAttributes)
|
||||
) {
|
||||
const data = await getPayloadAttributesForSSE(fork as ForkPostBellatrix, this.chain, {
|
||||
prepareState: updatedPrepareState,
|
||||
prepareSlot,
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ForkPostBellatrix, ForkSeq, isForkPostAltair, isForkPostBellatrix} from "@lodestar/params";
|
||||
import {
|
||||
ForkName,
|
||||
ForkPostBellatrix,
|
||||
ForkPostDeneb,
|
||||
ForkPostFulu,
|
||||
ForkSeq,
|
||||
isForkPostAltair,
|
||||
isForkPostBellatrix,
|
||||
} from "@lodestar/params";
|
||||
import {
|
||||
CachedBeaconStateAllForks,
|
||||
CachedBeaconStateBellatrix,
|
||||
@@ -18,7 +26,9 @@ import {
|
||||
BeaconBlockBody,
|
||||
BlindedBeaconBlock,
|
||||
BlindedBeaconBlockBody,
|
||||
BlobsBundle,
|
||||
Bytes32,
|
||||
ExecutionPayload,
|
||||
ExecutionPayloadHeader,
|
||||
Root,
|
||||
RootHex,
|
||||
@@ -30,6 +40,7 @@ import {
|
||||
capella,
|
||||
deneb,
|
||||
electra,
|
||||
fulu,
|
||||
ssz,
|
||||
sszTypesFor,
|
||||
} from "@lodestar/types";
|
||||
@@ -45,9 +56,10 @@ import {
|
||||
getExpectedGasLimit,
|
||||
} from "../../execution/index.js";
|
||||
import {fromGraffitiBytes} from "../../util/graffiti.js";
|
||||
import {kzg} from "../../util/kzg.js";
|
||||
import type {BeaconChain} from "../chain.js";
|
||||
import {CommonBlockBody} from "../interface.js";
|
||||
import {validateBlobsAndKzgCommitments} from "./validateBlobsAndKzgCommitments.js";
|
||||
import {validateBlobsAndKzgCommitments, validateCellsAndKzgCommitments} from "./validateBlobsAndKzgCommitments.js";
|
||||
|
||||
// Time to provide the EL to generate a payload from new payload id
|
||||
const PAYLOAD_GENERATION_TIME_MS = 500;
|
||||
@@ -91,16 +103,43 @@ export type AssembledBodyType<T extends BlockType> = T extends BlockType.Full
|
||||
: BlindedBeaconBlockBody;
|
||||
export type AssembledBlockType<T extends BlockType> = T extends BlockType.Full ? BeaconBlock : BlindedBeaconBlock;
|
||||
|
||||
export enum BlobsResultType {
|
||||
preDeneb,
|
||||
produced,
|
||||
blinded,
|
||||
}
|
||||
export type ProduceFullFulu = {
|
||||
type: BlockType.Full;
|
||||
fork: ForkPostFulu;
|
||||
executionPayload: ExecutionPayload<ForkPostFulu>;
|
||||
blobsBundle: BlobsBundle<ForkPostFulu>;
|
||||
cells: fulu.Cell[][];
|
||||
};
|
||||
export type ProduceFullDeneb = {
|
||||
type: BlockType.Full;
|
||||
fork: ForkName.deneb | ForkName.electra;
|
||||
executionPayload: ExecutionPayload<ForkPostDeneb>;
|
||||
blobsBundle: BlobsBundle<ForkPostDeneb>;
|
||||
};
|
||||
export type ProduceFullBellatrix = {
|
||||
type: BlockType.Full;
|
||||
fork: ForkName.bellatrix | ForkName.capella;
|
||||
executionPayload: ExecutionPayload<ForkPostBellatrix>;
|
||||
};
|
||||
export type ProduceFullPhase0 = {
|
||||
type: BlockType.Full;
|
||||
fork: ForkName.phase0 | ForkName.altair;
|
||||
};
|
||||
export type ProduceBlinded = {
|
||||
type: BlockType.Blinded;
|
||||
fork: ForkName;
|
||||
};
|
||||
|
||||
export type BlobsResult =
|
||||
| {type: BlobsResultType.preDeneb}
|
||||
| {type: BlobsResultType.produced; contents: deneb.Contents; blockHash: RootHex}
|
||||
| {type: BlobsResultType.blinded};
|
||||
// The results of block production returned by `produceBlockBody`
|
||||
// The types are defined separately so typecasting can be used
|
||||
|
||||
/** The result of local block production, everything that's not the block itself */
|
||||
export type ProduceResult =
|
||||
| ProduceFullFulu
|
||||
| ProduceFullDeneb
|
||||
| ProduceFullBellatrix
|
||||
| ProduceFullPhase0
|
||||
| ProduceBlinded;
|
||||
|
||||
export async function produceBlockBody<T extends BlockType>(
|
||||
this: BeaconChain,
|
||||
@@ -114,7 +153,7 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
}
|
||||
): Promise<{
|
||||
body: AssembledBodyType<T>;
|
||||
blobs: BlobsResult;
|
||||
produceResult: ProduceResult;
|
||||
executionPayloadValue: Wei;
|
||||
shouldOverrideBuilder?: boolean;
|
||||
}> {
|
||||
@@ -126,16 +165,16 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
proposerPubKey,
|
||||
commonBlockBodyPromise,
|
||||
} = blockAttr;
|
||||
// Type-safe for blobs variable. Translate 'null' value into 'preDeneb' enum
|
||||
// TODO: Not ideal, but better than just using null.
|
||||
// TODO: Does not guarantee that preDeneb enum goes with a preDeneb block
|
||||
let blobsResult: BlobsResult;
|
||||
let executionPayloadValue: Wei;
|
||||
let blockBody: AssembledBodyType<T>;
|
||||
// even though shouldOverrideBuilder is relevant for the engine response, for simplicity of typing
|
||||
// we just return it undefined for the builder which anyway doesn't get consumed downstream
|
||||
let shouldOverrideBuilder: boolean | undefined;
|
||||
const fork = currentState.config.getForkName(blockSlot);
|
||||
const produceResult = {
|
||||
type: blockType,
|
||||
fork,
|
||||
} as ProduceResult;
|
||||
|
||||
const logMeta: Record<string, string | number | bigint> = {
|
||||
fork,
|
||||
@@ -157,7 +196,7 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
Object.assign(logMeta, {feeRecipientType, feeRecipient});
|
||||
|
||||
if (blockType === BlockType.Blinded) {
|
||||
if (!this.executionBuilder) throw Error("Execution Builder not available");
|
||||
if (!this.executionBuilder) throw Error("External builder not configured");
|
||||
const executionBuilder = this.executionBuilder;
|
||||
|
||||
const builderPromise = (async () => {
|
||||
@@ -258,10 +297,7 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
}
|
||||
|
||||
(blockBody as deneb.BlindedBeaconBlockBody).blobKzgCommitments = blobKzgCommitments;
|
||||
blobsResult = {type: BlobsResultType.blinded};
|
||||
Object.assign(logMeta, {blobs: blobKzgCommitments.length});
|
||||
} else {
|
||||
blobsResult = {type: BlobsResultType.preDeneb};
|
||||
}
|
||||
|
||||
if (ForkSeq[fork] >= ForkSeq.electra) {
|
||||
@@ -356,13 +392,13 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
|
||||
if (engineRes.isPremerge) {
|
||||
(blockBody as BeaconBlockBody<ForkPostBellatrix>).executionPayload = engineRes.executionPayload;
|
||||
blobsResult = {type: BlobsResultType.preDeneb};
|
||||
executionPayloadValue = engineRes.executionPayloadValue;
|
||||
} else {
|
||||
const {prepType, payloadId, executionPayload, blobsBundle, executionRequests} = engineRes;
|
||||
shouldOverrideBuilder = engineRes.shouldOverrideBuilder;
|
||||
|
||||
(blockBody as BeaconBlockBody<ForkPostBellatrix>).executionPayload = executionPayload;
|
||||
(produceResult as ProduceFullBellatrix).executionPayload = executionPayload;
|
||||
executionPayloadValue = engineRes.executionPayloadValue;
|
||||
Object.assign(logMeta, {transactions: executionPayload.transactions.length, shouldOverrideBuilder});
|
||||
|
||||
@@ -380,23 +416,36 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
this.metrics?.blockPayload.emptyPayloads.inc({prepType});
|
||||
}
|
||||
|
||||
if (ForkSeq[fork] >= ForkSeq.deneb) {
|
||||
if (ForkSeq[fork] >= ForkSeq.fulu) {
|
||||
if (blobsBundle === undefined) {
|
||||
throw Error(`Missing blobsBundle response from getPayload at fork=${fork}`);
|
||||
}
|
||||
// NOTE: Even though the fulu.BlobsBundle type is superficially the same as deneb.BlobsBundle, it is NOT.
|
||||
// In fulu, proofs are _cell_ proofs, vs in deneb they are _blob_ proofs.
|
||||
|
||||
const cells = blobsBundle.blobs.map((blob) => kzg.computeCells(blob));
|
||||
if (this.opts.sanityCheckExecutionEngineBlobs) {
|
||||
await validateCellsAndKzgCommitments(blobsBundle.commitments, blobsBundle.proofs, cells);
|
||||
}
|
||||
|
||||
(blockBody as deneb.BeaconBlockBody).blobKzgCommitments = blobsBundle.commitments;
|
||||
(produceResult as ProduceFullFulu).blobsBundle = blobsBundle;
|
||||
(produceResult as ProduceFullFulu).cells = cells;
|
||||
|
||||
Object.assign(logMeta, {blobs: blobsBundle.commitments.length});
|
||||
} else if (ForkSeq[fork] >= ForkSeq.deneb) {
|
||||
if (blobsBundle === undefined) {
|
||||
throw Error(`Missing blobsBundle response from getPayload at fork=${fork}`);
|
||||
}
|
||||
|
||||
if (this.opts.sanityCheckExecutionEngineBlobs) {
|
||||
validateBlobsAndKzgCommitments(executionPayload, blobsBundle);
|
||||
await validateBlobsAndKzgCommitments(blobsBundle.commitments, blobsBundle.proofs, blobsBundle.blobs);
|
||||
}
|
||||
|
||||
(blockBody as deneb.BeaconBlockBody).blobKzgCommitments = blobsBundle.commitments;
|
||||
const blockHash = toRootHex(executionPayload.blockHash);
|
||||
const contents = {kzgProofs: blobsBundle.proofs, blobs: blobsBundle.blobs};
|
||||
blobsResult = {type: BlobsResultType.produced, contents, blockHash};
|
||||
(produceResult as ProduceFullDeneb).blobsBundle = blobsBundle;
|
||||
|
||||
Object.assign(logMeta, {blobs: blobsBundle.commitments.length});
|
||||
} else {
|
||||
blobsResult = {type: BlobsResultType.preDeneb};
|
||||
}
|
||||
|
||||
if (ForkSeq[fork] >= ForkSeq.electra) {
|
||||
@@ -411,7 +460,6 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
const commonBlockBody = await (commonBlockBodyPromise ??
|
||||
produceCommonBlockBody.call(this, blockType, currentState, blockAttr));
|
||||
blockBody = Object.assign({}, commonBlockBody) as AssembledBodyType<T>;
|
||||
blobsResult = {type: BlobsResultType.preDeneb};
|
||||
executionPayloadValue = BigInt(0);
|
||||
}
|
||||
|
||||
@@ -450,7 +498,7 @@ export async function produceBlockBody<T extends BlockType>(
|
||||
Object.assign(logMeta, {executionPayloadValue});
|
||||
this.logger.verbose("Produced beacon block body", logMeta);
|
||||
|
||||
return {body: blockBody as AssembledBodyType<T>, blobs: blobsResult, executionPayloadValue, shouldOverrideBuilder};
|
||||
return {body: blockBody as AssembledBodyType<T>, produceResult, executionPayloadValue, shouldOverrideBuilder};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -560,7 +608,7 @@ async function prepareExecutionPayloadHeader(
|
||||
|
||||
const parentHashRes = await getExecutionPayloadParentHash(chain, state);
|
||||
if (parentHashRes.isPremerge) {
|
||||
throw Error("Execution builder disabled pre-merge");
|
||||
throw Error("External builder disabled pre-merge");
|
||||
}
|
||||
|
||||
const {parentHash} = parentHashRes;
|
||||
|
||||
@@ -1,16 +1,54 @@
|
||||
import {ExecutionPayload} from "@lodestar/types";
|
||||
import {BlobsBundle} from "../../execution/index.js";
|
||||
import {CELLS_PER_EXT_BLOB} from "@lodestar/params";
|
||||
import {deneb, fulu} from "@lodestar/types";
|
||||
import {kzg} from "../../util/kzg.js";
|
||||
|
||||
/**
|
||||
* Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions
|
||||
* https://github.com/ethereum/consensus-specs/blob/11a037fd9227e29ee809c9397b09f8cc3383a8c0/specs/eip4844/validator.md#blob-kzg-commitments
|
||||
*/
|
||||
export async function validateBlobsAndKzgCommitments(
|
||||
commitments: deneb.KZGCommitment[],
|
||||
proofs: deneb.KZGProof[],
|
||||
blobs: deneb.Blobs
|
||||
): Promise<void> {
|
||||
if (blobs.length !== commitments.length) {
|
||||
throw Error(`Blobs bundle blobs len ${blobs.length} != commitments len ${commitments.length}`);
|
||||
}
|
||||
|
||||
export function validateBlobsAndKzgCommitments(_payload: ExecutionPayload, blobsBundle: BlobsBundle): void {
|
||||
// sanity-check that the KZG commitments match the blobs (as produced by the execution engine)
|
||||
if (blobsBundle.blobs.length !== blobsBundle.commitments.length) {
|
||||
throw Error(
|
||||
`Blobs bundle blobs len ${blobsBundle.blobs.length} != commitments len ${blobsBundle.commitments.length}`
|
||||
);
|
||||
if (proofs.length !== blobs.length) {
|
||||
throw new Error(`Invalid proofs length for BlobsBundleV1 format: expected ${blobs.length}, got ${proofs.length}`);
|
||||
}
|
||||
|
||||
if (!(await kzg.asyncVerifyBlobKzgProofBatch(blobs, commitments, proofs))) {
|
||||
throw new Error("Error in verifyBlobKzgProofBatch");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions
|
||||
*/
|
||||
export async function validateCellsAndKzgCommitments(
|
||||
commitments: deneb.KZGCommitment[],
|
||||
proofs: fulu.KZGProof[],
|
||||
cells: fulu.Cell[][]
|
||||
): Promise<void> {
|
||||
if (cells.length !== commitments.length) {
|
||||
throw Error(`Blobs bundle cells len ${cells.length} != commitments len ${commitments.length}`);
|
||||
}
|
||||
|
||||
const expectedProofsLength = cells.length * CELLS_PER_EXT_BLOB;
|
||||
if (proofs.length !== expectedProofsLength) {
|
||||
throw Error(
|
||||
`Invalid proofs length for BlobsBundleV2 format: expected ${expectedProofsLength}, got ${proofs.length}`
|
||||
);
|
||||
}
|
||||
|
||||
const commitmentBytes = commitments.flatMap((commitment) => Array(CELLS_PER_EXT_BLOB).fill(commitment));
|
||||
const cellIndices = Array.from({length: cells.length}).flatMap(() =>
|
||||
Array.from({length: CELLS_PER_EXT_BLOB}, (_, i) => i)
|
||||
);
|
||||
|
||||
if (!(await kzg.asyncVerifyCellKzgProofBatch(commitmentBytes, cellIndices, cells.flat(), proofs.flat()))) {
|
||||
throw new Error("Error in verifyCellKzgProofBatch");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,8 +20,8 @@ export async function computeSyncCommitteeRewards(
|
||||
const preStateAltair = preState as CachedBeaconStateAltair;
|
||||
const {index2pubkey} = preStateAltair.epochCtx;
|
||||
|
||||
// Bound committeeIndices in case it goes beyond SYNC_COMMITTEE_SIZE just to be safe
|
||||
const committeeIndices = preStateAltair.epochCtx.currentSyncCommitteeIndexed.validatorIndices.slice(
|
||||
// Bound syncCommitteeValidatorIndices in case it goes beyond SYNC_COMMITTEE_SIZE just to be safe
|
||||
const syncCommitteeValidatorIndices = preStateAltair.epochCtx.currentSyncCommitteeIndexed.validatorIndices.slice(
|
||||
0,
|
||||
SYNC_COMMITTEE_SIZE
|
||||
);
|
||||
@@ -30,11 +30,11 @@ export async function computeSyncCommitteeRewards(
|
||||
|
||||
// Use balance of each committee as starting point such that we cap the penalty to avoid balance dropping below 0
|
||||
const balances: Map<ValidatorIndex, BalanceRecord> = new Map();
|
||||
for (const i of committeeIndices) {
|
||||
for (const i of syncCommitteeValidatorIndices) {
|
||||
balances.set(i, {val: preStateAltair.balances.get(i)});
|
||||
}
|
||||
|
||||
for (const i of committeeIndices) {
|
||||
for (const i of syncCommitteeValidatorIndices) {
|
||||
const balanceRecord = balances.get(i) as BalanceRecord;
|
||||
if (syncCommitteeBits.get(i)) {
|
||||
// Positive rewards for participants
|
||||
|
||||
@@ -1,31 +1,58 @@
|
||||
import {toHexString} from "@chainsafe/ssz";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ForkName, isForkPostDeneb} from "@lodestar/params";
|
||||
import {RootHex, SignedBeaconBlock, deneb, ssz} from "@lodestar/types";
|
||||
import {pruneSetToMax, toRootHex} from "@lodestar/utils";
|
||||
import {ForkName, NUMBER_OF_COLUMNS, isForkPostDeneb} from "@lodestar/params";
|
||||
import {RootHex, SignedBeaconBlock, deneb, fulu, ssz} from "@lodestar/types";
|
||||
import {Logger, pruneSetToMax} from "@lodestar/utils";
|
||||
|
||||
import {IExecutionEngine} from "../../execution/index.js";
|
||||
import {Metrics} from "../../metrics/index.js";
|
||||
import {IClock} from "../../util/clock.js";
|
||||
import {
|
||||
CustodyConfig,
|
||||
RecoverResult,
|
||||
getDataColumnsFromExecution,
|
||||
hasSampledDataColumns,
|
||||
recoverDataColumnSidecars,
|
||||
} from "../../util/dataColumns.js";
|
||||
import {callInNextEventLoop} from "../../util/eventLoop.js";
|
||||
import {
|
||||
BlobsSource,
|
||||
BlockInput,
|
||||
BlockInputDataBlobs,
|
||||
BlockInputBlobs,
|
||||
BlockInputDataColumns,
|
||||
BlockSource,
|
||||
CachedData,
|
||||
CachedDataColumns,
|
||||
DataColumnsSource,
|
||||
GossipedInputType,
|
||||
NullBlockInput,
|
||||
getBlockInput,
|
||||
getBlockInputBlobs,
|
||||
getBlockInputDataColumns,
|
||||
} from "../blocks/types.js";
|
||||
import {ChainEvent, ChainEventEmitter} from "../emitter.js";
|
||||
import {DataColumnSidecarErrorCode, DataColumnSidecarGossipError} from "../errors/dataColumnSidecarError.js";
|
||||
import {GossipAction} from "../errors/gossipValidation.js";
|
||||
|
||||
export enum BlockInputAvailabilitySource {
|
||||
GOSSIP = "gossip",
|
||||
RECOVERED = "recovered",
|
||||
UNKNOWN_SYNC = "unknown_sync",
|
||||
}
|
||||
|
||||
type GossipedBlockInput =
|
||||
| {type: GossipedInputType.block; signedBlock: SignedBeaconBlock}
|
||||
| {type: GossipedInputType.blob; blobSidecar: deneb.BlobSidecar};
|
||||
| {type: GossipedInputType.blob; blobSidecar: deneb.BlobSidecar}
|
||||
| {
|
||||
type: GossipedInputType.dataColumn;
|
||||
dataColumnSidecar: fulu.DataColumnSidecar;
|
||||
dataColumnBytes: Uint8Array | null;
|
||||
};
|
||||
|
||||
type BlockInputCacheType = {
|
||||
// TODO(fulu): dedup with gossipHandlers.ts
|
||||
const BLOCK_AVAILABILITY_CUTOFF_MS = 3_000;
|
||||
|
||||
export type BlockInputCacheType = {
|
||||
fork: ForkName;
|
||||
block?: SignedBeaconBlock;
|
||||
cachedData?: CachedData;
|
||||
@@ -34,6 +61,24 @@ type BlockInputCacheType = {
|
||||
resolveBlockInput: (blockInput: BlockInput) => void;
|
||||
};
|
||||
|
||||
type GossipBlockInputResponseWithBlock = {
|
||||
blockInput: BlockInput;
|
||||
blockInputMeta:
|
||||
| {pending: GossipedInputType.blob | null; haveBlobs: number; expectedBlobs: number}
|
||||
| {pending: GossipedInputType.dataColumn | null; haveColumns: number; expectedColumns: number};
|
||||
};
|
||||
|
||||
type BlockInputPendingBlock = {pending: GossipedInputType.block};
|
||||
export type BlockInputMetaPendingBlockWithBlobs = BlockInputPendingBlock & {haveBlobs: number; expectedBlobs: null};
|
||||
type BlockInputMetaPendingBlockWithColumns = BlockInputPendingBlock & {haveColumns: number; expectedColumns: null};
|
||||
|
||||
type GossipBlockInputResponseWithNullBlock = {
|
||||
blockInput: NullBlockInput;
|
||||
blockInputMeta: BlockInputMetaPendingBlockWithBlobs | BlockInputMetaPendingBlockWithColumns;
|
||||
};
|
||||
|
||||
type GossipBlockInputResponse = GossipBlockInputResponseWithBlock | GossipBlockInputResponseWithNullBlock;
|
||||
|
||||
const MAX_GOSSIPINPUT_CACHE = 5;
|
||||
|
||||
/**
|
||||
@@ -48,7 +93,27 @@ const MAX_GOSSIPINPUT_CACHE = 5;
|
||||
* block are seen by SeenGossipBlockInput
|
||||
*/
|
||||
export class SeenGossipBlockInput {
|
||||
private blockInputCache = new Map<RootHex, BlockInputCacheType>();
|
||||
private readonly blockInputCache = new Map<RootHex, BlockInputCacheType>();
|
||||
private readonly custodyConfig: CustodyConfig;
|
||||
private readonly executionEngine: IExecutionEngine;
|
||||
private readonly clock: IClock;
|
||||
private readonly emitter: ChainEventEmitter;
|
||||
private readonly logger: Logger;
|
||||
|
||||
constructor(
|
||||
custodyConfig: CustodyConfig,
|
||||
executionEngine: IExecutionEngine,
|
||||
emitter: ChainEventEmitter,
|
||||
clock: IClock,
|
||||
logger: Logger
|
||||
) {
|
||||
this.custodyConfig = custodyConfig;
|
||||
this.executionEngine = executionEngine;
|
||||
this.clock = clock;
|
||||
this.emitter = emitter;
|
||||
this.logger = logger;
|
||||
}
|
||||
globalCacheId = 0;
|
||||
|
||||
prune(): void {
|
||||
pruneSetToMax(this.blockInputCache, MAX_GOSSIPINPUT_CACHE);
|
||||
@@ -58,19 +123,39 @@ export class SeenGossipBlockInput {
|
||||
return this.blockInputCache.has(blockRoot);
|
||||
}
|
||||
|
||||
/**
|
||||
* Intended to be used for gossip validation, specifically this check:
|
||||
* [IGNORE] The sidecar is the first sidecar for the tuple (block_header.slot, block_header.proposer_index,
|
||||
* sidecar.index) with valid header signature, sidecar inclusion proof, and kzg proof
|
||||
*/
|
||||
hasDataColumnSidecar(sidecar: fulu.DataColumnSidecar) {
|
||||
const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(sidecar.signedBlockHeader.message);
|
||||
const blockRootHex = toHexString(blockRoot);
|
||||
|
||||
const blockCache = this.blockInputCache.get(blockRootHex);
|
||||
if (blockCache === undefined) {
|
||||
return false;
|
||||
}
|
||||
if (blockCache.cachedData === undefined || blockCache.cachedData.fork !== ForkName.fulu) {
|
||||
return false;
|
||||
}
|
||||
const existingSidecar = blockCache.cachedData.dataColumnsCache.get(sidecar.index);
|
||||
if (!existingSidecar) {
|
||||
return false;
|
||||
}
|
||||
return (
|
||||
sidecar.signedBlockHeader.message.slot === existingSidecar.dataColumn.signedBlockHeader.message.slot &&
|
||||
sidecar.index === existingSidecar.dataColumn.index &&
|
||||
sidecar.signedBlockHeader.message.proposerIndex ===
|
||||
existingSidecar.dataColumn.signedBlockHeader.message.proposerIndex
|
||||
);
|
||||
}
|
||||
|
||||
getGossipBlockInput(
|
||||
config: ChainForkConfig,
|
||||
gossipedInput: GossipedBlockInput,
|
||||
metrics: Metrics | null
|
||||
):
|
||||
| {
|
||||
blockInput: BlockInput;
|
||||
blockInputMeta: {pending: GossipedInputType.blob | null; haveBlobs: number; expectedBlobs: number};
|
||||
}
|
||||
| {
|
||||
blockInput: NullBlockInput;
|
||||
blockInputMeta: {pending: GossipedInputType.block; haveBlobs: number; expectedBlobs: null};
|
||||
} {
|
||||
): GossipBlockInputResponse {
|
||||
let blockHex: RootHex;
|
||||
let blockCache: BlockInputCacheType;
|
||||
let fork: ForkName;
|
||||
@@ -79,24 +164,65 @@ export class SeenGossipBlockInput {
|
||||
const {signedBlock} = gossipedInput;
|
||||
fork = config.getForkName(signedBlock.message.slot);
|
||||
|
||||
blockHex = toRootHex(config.getForkTypes(signedBlock.message.slot).BeaconBlock.hashTreeRoot(signedBlock.message));
|
||||
blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork);
|
||||
blockHex = toHexString(
|
||||
config.getForkTypes(signedBlock.message.slot).BeaconBlock.hashTreeRoot(signedBlock.message)
|
||||
);
|
||||
blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork, ++this.globalCacheId);
|
||||
|
||||
blockCache.block = signedBlock;
|
||||
} else {
|
||||
} else if (gossipedInput.type === GossipedInputType.blob) {
|
||||
const {blobSidecar} = gossipedInput;
|
||||
const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message);
|
||||
fork = config.getForkName(blobSidecar.signedBlockHeader.message.slot);
|
||||
|
||||
blockHex = toRootHex(blockRoot);
|
||||
blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork);
|
||||
blockHex = toHexString(blockRoot);
|
||||
blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork, ++this.globalCacheId);
|
||||
if (blockCache.cachedData?.fork !== ForkName.deneb && blockCache.cachedData?.fork !== ForkName.electra) {
|
||||
throw Error(`blob data at non deneb/electra fork=${blockCache.fork}`);
|
||||
}
|
||||
|
||||
// TODO: freetheblobs check if its the same blob or a duplicate and throw/take actions
|
||||
blockCache.cachedData?.blobsCache.set(blobSidecar.index, blobSidecar);
|
||||
} else if (gossipedInput.type === GossipedInputType.dataColumn) {
|
||||
const {dataColumnSidecar, dataColumnBytes} = gossipedInput;
|
||||
const blockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(dataColumnSidecar.signedBlockHeader.message);
|
||||
fork = config.getForkName(dataColumnSidecar.signedBlockHeader.message.slot);
|
||||
|
||||
blockHex = toHexString(blockRoot);
|
||||
blockCache = this.blockInputCache.get(blockHex) ?? getEmptyBlockInputCacheEntry(fork, ++this.globalCacheId);
|
||||
if (blockCache.cachedData?.fork !== ForkName.fulu) {
|
||||
throw Error(`data column data at non fulu fork=${blockCache.fork}`);
|
||||
}
|
||||
|
||||
if (this.hasDataColumnSidecar(dataColumnSidecar)) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
||||
code: DataColumnSidecarErrorCode.ALREADY_KNOWN,
|
||||
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
||||
columnIdx: dataColumnSidecar.index,
|
||||
});
|
||||
}
|
||||
|
||||
blockCache.cachedData?.dataColumnsCache.set(dataColumnSidecar.index, {
|
||||
dataColumn: dataColumnSidecar,
|
||||
// easily splice out the unsigned message as blob is a fixed length type
|
||||
dataColumnBytes: dataColumnBytes?.slice(0, dataColumnBytes.length) ?? null,
|
||||
});
|
||||
} else {
|
||||
// somehow helps resolve typescript that all types have been exausted
|
||||
throw Error("Invalid gossipedInput type");
|
||||
}
|
||||
|
||||
if (!this.blockInputCache.has(blockHex)) {
|
||||
this.blockInputCache.set(blockHex, blockCache);
|
||||
callInNextEventLoop(() => {
|
||||
getDataColumnsFromExecution(config, this.custodyConfig, this.executionEngine, this.emitter, blockCache, metrics)
|
||||
.then((_success) => {
|
||||
// TODO: (@matthewkeil) add metrics collection point here
|
||||
})
|
||||
.catch((error) => {
|
||||
this.logger.warn("Error getting data columns from execution", {blockHex}, error);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
const {block: signedBlock, blockInputPromise, resolveBlockInput, cachedData} = blockCache;
|
||||
@@ -112,65 +238,252 @@ export class SeenGossipBlockInput {
|
||||
if (cachedData === undefined || !isForkPostDeneb(cachedData.fork)) {
|
||||
throw Error("Missing or Invalid fork cached Data for post-deneb block");
|
||||
}
|
||||
const {blobsCache, resolveAvailability} = cachedData;
|
||||
|
||||
// block is available, check if all blobs have shown up
|
||||
const {slot, body} = signedBlock.message;
|
||||
const {blobKzgCommitments} = body as deneb.BeaconBlockBody;
|
||||
const blockInfo = `blockHex=${blockHex}, slot=${slot}`;
|
||||
if (cachedData.fork === ForkName.deneb || cachedData.fork === ForkName.electra) {
|
||||
const {blobsCache, resolveAvailability} = cachedData;
|
||||
|
||||
if (blobKzgCommitments.length < blobsCache.size) {
|
||||
throw Error(
|
||||
`Received more blobs=${blobsCache.size} than commitments=${blobKzgCommitments.length} for ${blockInfo}`
|
||||
);
|
||||
}
|
||||
// block is available, check if all blobs have shown up
|
||||
const {slot, body} = signedBlock.message;
|
||||
const {blobKzgCommitments} = body as deneb.BeaconBlockBody;
|
||||
const blockInfo = `blockHex=${blockHex}, slot=${slot}`;
|
||||
|
||||
if (blobKzgCommitments.length === blobsCache.size) {
|
||||
const allBlobs = getBlockInputBlobs(blobsCache);
|
||||
const blockData = {...allBlobs, blobsSource: BlobsSource.gossip, fork: cachedData.fork};
|
||||
resolveAvailability(blockData);
|
||||
metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.GOSSIP});
|
||||
const blockInput = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData);
|
||||
if (blobKzgCommitments.length < blobsCache.size) {
|
||||
throw Error(
|
||||
`Received more blobs=${blobsCache.size} than commitments=${blobKzgCommitments.length} for ${blockInfo}`
|
||||
);
|
||||
}
|
||||
|
||||
if (blobKzgCommitments.length === blobsCache.size) {
|
||||
const allBlobs = getBlockInputBlobs(blobsCache);
|
||||
const {blobs} = allBlobs;
|
||||
const blockData = {
|
||||
fork: cachedData.fork,
|
||||
...allBlobs,
|
||||
blobsSource: BlobsSource.gossip,
|
||||
};
|
||||
resolveAvailability(blockData);
|
||||
metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.GOSSIP});
|
||||
|
||||
const blockInput = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData);
|
||||
|
||||
resolveBlockInput(blockInput);
|
||||
return {
|
||||
blockInput,
|
||||
blockInputMeta: {pending: null, haveBlobs: blobs.length, expectedBlobs: blobKzgCommitments.length},
|
||||
};
|
||||
}
|
||||
|
||||
const blockInput = getBlockInput.dataPromise(config, signedBlock, BlockSource.gossip, cachedData);
|
||||
|
||||
resolveBlockInput(blockInput);
|
||||
return {
|
||||
blockInput,
|
||||
blockInputMeta: {pending: null, haveBlobs: allBlobs.blobs.length, expectedBlobs: blobKzgCommitments.length},
|
||||
blockInputMeta: {
|
||||
pending: GossipedInputType.blob,
|
||||
haveBlobs: blobsCache.size,
|
||||
expectedBlobs: blobKzgCommitments.length,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const blockInput = getBlockInput.dataPromise(config, signedBlock, BlockSource.gossip, cachedData);
|
||||
if (cachedData.fork === ForkName.fulu) {
|
||||
const {dataColumnsCache, resolveAvailability, calledRecover} = cachedData as CachedDataColumns;
|
||||
|
||||
resolveBlockInput(blockInput);
|
||||
return {
|
||||
blockInput,
|
||||
blockInputMeta: {
|
||||
pending: GossipedInputType.blob,
|
||||
haveBlobs: blobsCache.size,
|
||||
expectedBlobs: blobKzgCommitments.length,
|
||||
},
|
||||
};
|
||||
// block is available, check if all blobs have shown up
|
||||
const {slot} = signedBlock.message;
|
||||
const blockInfo = `blockHex=${blockHex}, slot=${slot}`;
|
||||
|
||||
if (NUMBER_OF_COLUMNS < dataColumnsCache.size) {
|
||||
throw Error(
|
||||
`Received more dataColumns=${dataColumnsCache.size} than columns=${NUMBER_OF_COLUMNS} for ${blockInfo}`
|
||||
);
|
||||
}
|
||||
|
||||
// get the custody columns and see if we have got all the requisite columns
|
||||
const blobKzgCommitmentsLen = (signedBlock.message.body as deneb.BeaconBlockBody).blobKzgCommitments.length;
|
||||
if (blobKzgCommitmentsLen === 0) {
|
||||
const blockData: BlockInputDataColumns = {
|
||||
fork: cachedData.fork,
|
||||
dataColumns: [],
|
||||
dataColumnsBytes: [],
|
||||
dataColumnsSource: DataColumnsSource.gossip,
|
||||
};
|
||||
resolveAvailability(blockData);
|
||||
metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source: BlockInputAvailabilitySource.GOSSIP});
|
||||
|
||||
const blockInput = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData);
|
||||
|
||||
resolveBlockInput(blockInput);
|
||||
return {
|
||||
blockInput,
|
||||
blockInputMeta: {pending: null, haveColumns: 0, expectedColumns: 0},
|
||||
};
|
||||
}
|
||||
|
||||
const resolveAvailabilityAndBlockInput = (source: BlockInputAvailabilitySource) => {
|
||||
const allDataColumns = getBlockInputDataColumns(dataColumnsCache, this.custodyConfig.sampledColumns);
|
||||
const blockData: BlockInputDataColumns = {
|
||||
fork: cachedData.fork,
|
||||
...allDataColumns,
|
||||
dataColumnsSource: DataColumnsSource.gossip,
|
||||
};
|
||||
resolveAvailability(blockData);
|
||||
// TODO(das): should not use syncUnknownBlock metrics here
|
||||
metrics?.syncUnknownBlock.resolveAvailabilitySource.inc({source});
|
||||
metrics?.dataColumns.bySource.inc({source: DataColumnsSource.gossip});
|
||||
|
||||
const blockInput = getBlockInput.availableData(config, signedBlock, BlockSource.gossip, blockData);
|
||||
resolveBlockInput(blockInput);
|
||||
return blockInput;
|
||||
};
|
||||
|
||||
const columnCount = dataColumnsCache.size;
|
||||
if (
|
||||
// only try to recover all columns with "--supernode"
|
||||
this.custodyConfig.sampledColumns.length === NUMBER_OF_COLUMNS &&
|
||||
columnCount >= NUMBER_OF_COLUMNS / 2 &&
|
||||
columnCount < NUMBER_OF_COLUMNS &&
|
||||
!calledRecover &&
|
||||
// doing recover right away is not efficient because it may delay data_column_sidecar validation
|
||||
this.clock.secFromSlot(slot) * 1000 >= BLOCK_AVAILABILITY_CUTOFF_MS
|
||||
) {
|
||||
// should call once per slot
|
||||
cachedData.calledRecover = true;
|
||||
callInNextEventLoop(async () => {
|
||||
const logCtx = {
|
||||
blockHex,
|
||||
slot,
|
||||
dataColumns: dataColumnsCache.size,
|
||||
};
|
||||
const recoverResult = await recoverDataColumnSidecars(dataColumnsCache, this.clock, metrics).catch((e) => {
|
||||
this.logger.error("Error recovering data column sidecars", logCtx, e);
|
||||
return RecoverResult.Failed;
|
||||
});
|
||||
metrics?.recoverDataColumnSidecars.reconstructionResult.inc({result: recoverResult});
|
||||
switch (recoverResult) {
|
||||
case RecoverResult.SuccessResolved: {
|
||||
resolveAvailabilityAndBlockInput(BlockInputAvailabilitySource.RECOVERED);
|
||||
// Publish columns if and only if subscribed to them
|
||||
const sampledColumns = this.custodyConfig.sampledColumns.map((columnIndex) => {
|
||||
const dataColumn = dataColumnsCache.get(columnIndex)?.dataColumn;
|
||||
if (!dataColumn) {
|
||||
throw Error(`After recover, missing data column for index=${columnIndex} in cache`);
|
||||
}
|
||||
return dataColumn;
|
||||
});
|
||||
|
||||
// for columns that we already seen, it will be ignored through `ignoreDuplicatePublishError` gossip option
|
||||
this.emitter.emit(ChainEvent.publishDataColumns, sampledColumns);
|
||||
this.logger.verbose("Recovered data column sidecars and resolved availability", logCtx);
|
||||
break;
|
||||
}
|
||||
case RecoverResult.SuccessLate:
|
||||
this.logger.verbose("Recovered data column sidecars but it's late to resolve availability", logCtx);
|
||||
break;
|
||||
case RecoverResult.Failed:
|
||||
this.logger.verbose("Failed to recover data column sidecars", logCtx);
|
||||
break;
|
||||
case RecoverResult.NotAttemptedFull:
|
||||
this.logger.verbose("Did not attempt because we have full column sidecars", logCtx);
|
||||
break;
|
||||
case RecoverResult.NotAttemptedLessThanHalf:
|
||||
this.logger.verbose("Did not attempt because we have too few column sidecars", logCtx);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
});
|
||||
}
|
||||
if (hasSampledDataColumns(this.custodyConfig, dataColumnsCache)) {
|
||||
const blockInput = resolveAvailabilityAndBlockInput(BlockInputAvailabilitySource.GOSSIP);
|
||||
const allDataColumns = getBlockInputDataColumns(dataColumnsCache, this.custodyConfig.sampledColumns);
|
||||
const {dataColumns} = allDataColumns;
|
||||
return {
|
||||
blockInput,
|
||||
blockInputMeta: {
|
||||
pending: null,
|
||||
haveColumns: dataColumns.length,
|
||||
expectedColumns: this.custodyConfig.sampledColumns.length,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const blockInput = getBlockInput.dataPromise(config, signedBlock, BlockSource.gossip, cachedData);
|
||||
|
||||
resolveBlockInput(blockInput);
|
||||
return {
|
||||
blockInput,
|
||||
blockInputMeta: {
|
||||
pending: GossipedInputType.dataColumn,
|
||||
haveColumns: dataColumnsCache.size,
|
||||
expectedColumns: this.custodyConfig.sampledColumns.length,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
throw Error(`Invalid fork=${fork}`);
|
||||
}
|
||||
|
||||
// will need to wait for the block to showup
|
||||
if (cachedData === undefined) {
|
||||
throw Error("Missing cachedData for deneb+ blobs");
|
||||
}
|
||||
const {blobsCache} = cachedData;
|
||||
|
||||
return {
|
||||
blockInput: {
|
||||
block: null,
|
||||
blockRootHex: blockHex,
|
||||
cachedData,
|
||||
blockInputPromise,
|
||||
},
|
||||
blockInputMeta: {pending: GossipedInputType.block, haveBlobs: blobsCache.size, expectedBlobs: null},
|
||||
};
|
||||
if (cachedData.fork === ForkName.deneb || cachedData.fork === ForkName.electra) {
|
||||
const {blobsCache} = cachedData;
|
||||
|
||||
return {
|
||||
blockInput: {
|
||||
block: null,
|
||||
blockRootHex: blockHex,
|
||||
cachedData,
|
||||
blockInputPromise,
|
||||
},
|
||||
blockInputMeta: {pending: GossipedInputType.block, haveBlobs: blobsCache.size, expectedBlobs: null},
|
||||
};
|
||||
}
|
||||
|
||||
if (fork === ForkName.fulu) {
|
||||
const {dataColumnsCache} = cachedData as CachedDataColumns;
|
||||
|
||||
return {
|
||||
blockInput: {
|
||||
block: null,
|
||||
blockRootHex: blockHex,
|
||||
cachedData,
|
||||
blockInputPromise,
|
||||
},
|
||||
blockInputMeta: {pending: GossipedInputType.block, haveColumns: dataColumnsCache.size, expectedColumns: null},
|
||||
};
|
||||
}
|
||||
|
||||
throw Error(`invalid fork=${fork} data not implemented`);
|
||||
|
||||
/**
|
||||
* TODO: @matthewkeil this code was unreachable. Commented to remove lint error but need to verify the condition
|
||||
* again to make sure this is not necessary before deleting it
|
||||
*
|
||||
* DO NOT DELETE until verified can be removed
|
||||
*/
|
||||
// will need to wait for the block to showup
|
||||
// if (cachedData === undefined) {
|
||||
// throw Error("Missing cachedData for deneb+ blobs");
|
||||
// }
|
||||
// const {blobsCache} = cachedData as CachedBlobs;
|
||||
|
||||
// return {
|
||||
// blockInput: {
|
||||
// block: null,
|
||||
// blockRootHex: blockHex,
|
||||
// cachedData: cachedData as CachedData,
|
||||
// blockInputPromise,
|
||||
// },
|
||||
// blockInputMeta: {pending: GossipedInputType.block, haveBlobs: blobsCache.size, expectedBlobs: null},
|
||||
// };
|
||||
}
|
||||
}
|
||||
|
||||
function getEmptyBlockInputCacheEntry(fork: ForkName): BlockInputCacheType {
|
||||
export function getEmptyBlockInputCacheEntry(fork: ForkName, globalCacheId: number): BlockInputCacheType {
|
||||
// Capture both the promise and its callbacks for blockInput and final availability
|
||||
// It is not spec'ed but in tests in Firefox and NodeJS the promise constructor is run immediately
|
||||
let resolveBlockInput: ((block: BlockInput) => void) | null = null;
|
||||
@@ -184,16 +497,48 @@ function getEmptyBlockInputCacheEntry(fork: ForkName): BlockInputCacheType {
|
||||
return {fork, blockInputPromise, resolveBlockInput};
|
||||
}
|
||||
|
||||
let resolveAvailability: ((blobs: BlockInputDataBlobs) => void) | null = null;
|
||||
const availabilityPromise = new Promise<BlockInputDataBlobs>((resolveCB) => {
|
||||
resolveAvailability = resolveCB;
|
||||
});
|
||||
if (fork === ForkName.deneb || fork === ForkName.electra) {
|
||||
let resolveAvailability: ((blobs: BlockInputBlobs) => void) | null = null;
|
||||
const availabilityPromise = new Promise<BlockInputBlobs>((resolveCB) => {
|
||||
resolveAvailability = resolveCB;
|
||||
});
|
||||
|
||||
if (resolveAvailability === null) {
|
||||
throw Error("Promise Constructor was not executed immediately");
|
||||
if (resolveAvailability === null) {
|
||||
throw Error("Promise Constructor was not executed immediately");
|
||||
}
|
||||
|
||||
const blobsCache = new Map();
|
||||
const cachedData: CachedData = {
|
||||
fork,
|
||||
blobsCache,
|
||||
availabilityPromise,
|
||||
resolveAvailability,
|
||||
cacheId: ++globalCacheId,
|
||||
};
|
||||
return {fork, blockInputPromise, resolveBlockInput, cachedData};
|
||||
}
|
||||
|
||||
const blobsCache = new Map();
|
||||
const cachedData: CachedData = {fork, blobsCache, availabilityPromise, resolveAvailability};
|
||||
return {fork, blockInputPromise, resolveBlockInput, cachedData};
|
||||
if (fork === ForkName.fulu) {
|
||||
let resolveAvailability: ((blobs: BlockInputDataColumns) => void) | null = null;
|
||||
const availabilityPromise = new Promise<BlockInputDataColumns>((resolveCB) => {
|
||||
resolveAvailability = resolveCB;
|
||||
});
|
||||
|
||||
if (resolveAvailability === null) {
|
||||
throw Error("Promise Constructor was not executed immediately");
|
||||
}
|
||||
|
||||
const dataColumnsCache = new Map();
|
||||
const cachedData: CachedData = {
|
||||
fork,
|
||||
dataColumnsCache,
|
||||
availabilityPromise,
|
||||
resolveAvailability,
|
||||
cacheId: ++globalCacheId,
|
||||
calledRecover: false,
|
||||
};
|
||||
return {fork, blockInputPromise, resolveBlockInput, cachedData};
|
||||
}
|
||||
|
||||
throw Error(`Invalid fork=${fork} for getEmptyBlockInputCacheEntry`);
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ import {IBeaconChain} from "../index.js";
|
||||
import {RegenCaller} from "../regen/index.js";
|
||||
import {
|
||||
getAttestationDataSigningRoot,
|
||||
getCommitteeIndices,
|
||||
getCommitteeValidatorIndices,
|
||||
getSeenAttDataKeyFromSignedAggregateAndProof,
|
||||
getShufflingForAttestationVerification,
|
||||
verifyHeadBlockAndTargetRoot,
|
||||
@@ -21,7 +21,7 @@ import {getAggregateAndProofSignatureSet, getSelectionProofSignatureSet} from ".
|
||||
|
||||
export type AggregateAndProofValidationResult = {
|
||||
indexedAttestation: IndexedAttestation;
|
||||
committeeIndices: Uint32Array;
|
||||
committeeValidatorIndices: Uint32Array;
|
||||
attDataRootHex: RootHex;
|
||||
};
|
||||
|
||||
@@ -175,16 +175,16 @@ async function validateAggregateAndProof(
|
||||
|
||||
// [REJECT] The committee index is within the expected range
|
||||
// -- i.e. data.index < get_committee_count_per_slot(state, data.target.epoch)
|
||||
const committeeIndices = cachedAttData
|
||||
const committeeValidatorIndices = cachedAttData
|
||||
? cachedAttData.committeeValidatorIndices
|
||||
: getCommitteeIndices(shuffling, attSlot, attIndex);
|
||||
: getCommitteeValidatorIndices(shuffling, attSlot, attIndex);
|
||||
|
||||
// [REJECT] The number of aggregation bits matches the committee size
|
||||
// -- i.e. `len(aggregation_bits) == len(get_beacon_committee(state, aggregate.data.slot, index))`.
|
||||
if (aggregate.aggregationBits.bitLen !== committeeIndices.length) {
|
||||
if (aggregate.aggregationBits.bitLen !== committeeValidatorIndices.length) {
|
||||
throw new AttestationError(GossipAction.REJECT, {code: AttestationErrorCode.WRONG_NUMBER_OF_AGGREGATION_BITS});
|
||||
}
|
||||
const attestingIndices = aggregate.aggregationBits.intersectValues(committeeIndices);
|
||||
const attestingIndices = aggregate.aggregationBits.intersectValues(committeeValidatorIndices);
|
||||
|
||||
const indexedAttestation: IndexedAttestation = {
|
||||
attestingIndices,
|
||||
@@ -202,13 +202,13 @@ async function validateAggregateAndProof(
|
||||
|
||||
// [REJECT] aggregate_and_proof.selection_proof selects the validator as an aggregator for the slot
|
||||
// -- i.e. is_aggregator(state, aggregate.data.slot, aggregate.data.index, aggregate_and_proof.selection_proof) returns True.
|
||||
if (!isAggregatorFromCommitteeLength(committeeIndices.length, aggregateAndProof.selectionProof)) {
|
||||
if (!isAggregatorFromCommitteeLength(committeeValidatorIndices.length, aggregateAndProof.selectionProof)) {
|
||||
throw new AttestationError(GossipAction.REJECT, {code: AttestationErrorCode.INVALID_AGGREGATOR});
|
||||
}
|
||||
|
||||
// [REJECT] The aggregator's validator index is within the committee
|
||||
// -- i.e. aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot, aggregate.data.index).
|
||||
if (!committeeIndices.includes(aggregateAndProof.aggregatorIndex)) {
|
||||
if (!committeeValidatorIndices.includes(aggregateAndProof.aggregatorIndex)) {
|
||||
throw new AttestationError(GossipAction.REJECT, {code: AttestationErrorCode.AGGREGATOR_NOT_IN_COMMITTEE});
|
||||
}
|
||||
|
||||
@@ -254,5 +254,5 @@ async function validateAggregateAndProof(
|
||||
false
|
||||
);
|
||||
|
||||
return {indexedAttestation, committeeIndices, attDataRootHex};
|
||||
return {indexedAttestation, committeeValidatorIndices, attDataRootHex};
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ export type AttestationValidationResult = {
|
||||
subnet: SubnetID;
|
||||
attDataRootHex: RootHex;
|
||||
committeeIndex: CommitteeIndex;
|
||||
committeeValidatorIndex: number;
|
||||
validatorCommitteeIndex: number;
|
||||
committeeSize: number;
|
||||
};
|
||||
|
||||
@@ -335,7 +335,7 @@ async function validateAttestationNoSignatureCheck(
|
||||
}
|
||||
|
||||
let aggregationBits: BitArray | null = null;
|
||||
let committeeValidatorIndex: number | null = null;
|
||||
let validatorCommitteeIndex: number | null = null;
|
||||
if (!isForkPostElectra(fork)) {
|
||||
// [REJECT] The attestation is unaggregated -- that is, it has exactly one participating validator
|
||||
// (len([bit for bit in attestation.aggregation_bits if bit]) == 1, i.e. exactly 1 bit is set).
|
||||
@@ -355,7 +355,7 @@ async function validateAttestationNoSignatureCheck(
|
||||
code: AttestationErrorCode.NOT_EXACTLY_ONE_AGGREGATION_BIT_SET,
|
||||
});
|
||||
}
|
||||
committeeValidatorIndex = bitIndex;
|
||||
validatorCommitteeIndex = bitIndex;
|
||||
}
|
||||
|
||||
let committeeValidatorIndices: Uint32Array;
|
||||
@@ -404,7 +404,7 @@ async function validateAttestationNoSignatureCheck(
|
||||
|
||||
// [REJECT] The committee index is within the expected range
|
||||
// -- i.e. data.index < get_committee_count_per_slot(state, data.target.epoch)
|
||||
committeeValidatorIndices = getCommitteeIndices(shuffling, attSlot, committeeIndex);
|
||||
committeeValidatorIndices = getCommitteeValidatorIndices(shuffling, attSlot, committeeIndex);
|
||||
getSigningRoot = () => getAttestationDataSigningRoot(chain.config, attData);
|
||||
expectedSubnet = computeSubnetForSlot(shuffling, attSlot, committeeIndex);
|
||||
}
|
||||
@@ -414,9 +414,9 @@ async function validateAttestationNoSignatureCheck(
|
||||
if (!isForkPostElectra(fork)) {
|
||||
// The validity of aggregation bits are already checked above
|
||||
assert.notNull(aggregationBits);
|
||||
assert.notNull(committeeValidatorIndex);
|
||||
assert.notNull(validatorCommitteeIndex);
|
||||
|
||||
validatorIndex = committeeValidatorIndices[committeeValidatorIndex];
|
||||
validatorIndex = committeeValidatorIndices[validatorCommitteeIndex];
|
||||
// [REJECT] The number of aggregation bits matches the committee size
|
||||
// -- i.e. len(attestation.aggregation_bits) == len(get_beacon_committee(state, data.slot, data.index)).
|
||||
// > TODO: Is this necessary? Lighthouse does not do this check.
|
||||
@@ -441,8 +441,8 @@ async function validateAttestationNoSignatureCheck(
|
||||
// [REJECT] The attester is a member of the committee -- i.e.
|
||||
// `attestation.attester_index in get_beacon_committee(state, attestation.data.slot, index)`.
|
||||
// Position of the validator in its committee
|
||||
committeeValidatorIndex = committeeValidatorIndices.indexOf(validatorIndex);
|
||||
if (committeeValidatorIndex === -1) {
|
||||
validatorCommitteeIndex = committeeValidatorIndices.indexOf(validatorIndex);
|
||||
if (validatorCommitteeIndex === -1) {
|
||||
throw new AttestationError(GossipAction.REJECT, {
|
||||
code: AttestationErrorCode.ATTESTER_NOT_IN_COMMITTEE,
|
||||
});
|
||||
@@ -557,7 +557,7 @@ async function validateAttestationNoSignatureCheck(
|
||||
signatureSet,
|
||||
validatorIndex,
|
||||
committeeIndex,
|
||||
committeeValidatorIndex,
|
||||
validatorCommitteeIndex,
|
||||
committeeSize: committeeValidatorIndices.length,
|
||||
};
|
||||
}
|
||||
@@ -797,10 +797,10 @@ function verifyAttestationTargetRoot(headBlock: ProtoBlock, targetRoot: Root, at
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of indices of validators in the given committee
|
||||
* Get a list of validator indices in the given committee
|
||||
* attestationIndex - Index of the committee in shuffling.committees
|
||||
*/
|
||||
export function getCommitteeIndices(
|
||||
export function getCommitteeValidatorIndices(
|
||||
shuffling: EpochShuffling,
|
||||
attestationSlot: Slot,
|
||||
attestationIndex: number
|
||||
|
||||
334
packages/beacon-node/src/chain/validation/dataColumnSidecar.ts
Normal file
334
packages/beacon-node/src/chain/validation/dataColumnSidecar.ts
Normal file
@@ -0,0 +1,334 @@
|
||||
import {ChainConfig} from "@lodestar/config";
|
||||
import {
|
||||
KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH,
|
||||
KZG_COMMITMENTS_SUBTREE_INDEX,
|
||||
NUMBER_OF_COLUMNS,
|
||||
} from "@lodestar/params";
|
||||
import {Root, Slot, SubnetID, deneb, fulu, ssz} from "@lodestar/types";
|
||||
import {toRootHex, verifyMerkleBranch} from "@lodestar/utils";
|
||||
|
||||
import {computeStartSlotAtEpoch, getBlockHeaderProposerSignatureSet} from "@lodestar/state-transition";
|
||||
import {Metrics} from "../../metrics/metrics.js";
|
||||
import {byteArrayEquals} from "../../util/bytes.js";
|
||||
import {kzg} from "../../util/kzg.js";
|
||||
import {DataColumnSidecarErrorCode, DataColumnSidecarGossipError} from "../errors/dataColumnSidecarError.js";
|
||||
import {GossipAction} from "../errors/gossipValidation.js";
|
||||
import {IBeaconChain} from "../interface.js";
|
||||
import {RegenCaller} from "../regen/interface.js";
|
||||
|
||||
// SPEC FUNCTION
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#data_column_sidecar_subnet_id
|
||||
export async function validateGossipDataColumnSidecar(
|
||||
chain: IBeaconChain,
|
||||
dataColumnSidecar: fulu.DataColumnSidecar,
|
||||
gossipSubnet: SubnetID,
|
||||
metrics: Metrics | null
|
||||
): Promise<void> {
|
||||
const blockHeader = dataColumnSidecar.signedBlockHeader.message;
|
||||
|
||||
// 1) [REJECT] The sidecar is valid as verified by verify_data_column_sidecar
|
||||
verifyDataColumnSidecar(dataColumnSidecar);
|
||||
|
||||
// 2) [REJECT] The sidecar is for the correct subnet -- i.e. compute_subnet_for_data_column_sidecar(sidecar.index) == subnet_id
|
||||
if (computeSubnetForDataColumnSidecar(chain.config, dataColumnSidecar) !== gossipSubnet) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
||||
code: DataColumnSidecarErrorCode.INVALID_SUBNET,
|
||||
columnIdx: dataColumnSidecar.index,
|
||||
gossipSubnet: gossipSubnet,
|
||||
});
|
||||
}
|
||||
|
||||
// 3) [IGNORE] The sidecar is not from a future slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
|
||||
// -- i.e. validate that sidecar.slot <= current_slot (a client MAY queue future blocks
|
||||
// for processing at the appropriate slot).
|
||||
const currentSlotWithGossipDisparity = chain.clock.currentSlotWithGossipDisparity;
|
||||
if (currentSlotWithGossipDisparity < blockHeader.slot) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
||||
code: DataColumnSidecarErrorCode.FUTURE_SLOT,
|
||||
currentSlot: currentSlotWithGossipDisparity,
|
||||
blockSlot: blockHeader.slot,
|
||||
});
|
||||
}
|
||||
|
||||
// 4) [IGNORE] The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that
|
||||
// sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
||||
const finalizedCheckpoint = chain.forkChoice.getFinalizedCheckpoint();
|
||||
const finalizedSlot = computeStartSlotAtEpoch(finalizedCheckpoint.epoch);
|
||||
if (blockHeader.slot <= finalizedSlot) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
||||
code: DataColumnSidecarErrorCode.WOULD_REVERT_FINALIZED_SLOT,
|
||||
blockSlot: blockHeader.slot,
|
||||
finalizedSlot,
|
||||
});
|
||||
}
|
||||
|
||||
// 6) [IGNORE] The sidecar's block's parent (defined by block_header.parent_root) has been seen (via gossip
|
||||
// or non-gossip sources)
|
||||
const parentRoot = toRootHex(blockHeader.parentRoot);
|
||||
const parentBlock = chain.forkChoice.getBlockHex(parentRoot);
|
||||
if (parentBlock === null) {
|
||||
// If fork choice does *not* consider the parent to be a descendant of the finalized block,
|
||||
// then there are two more cases:
|
||||
//
|
||||
// 1. We have the parent stored in our database. Because fork-choice has confirmed the
|
||||
// parent is *not* in our post-finalization DAG, all other blocks must be either
|
||||
// pre-finalization or conflicting with finalization.
|
||||
// 2. The parent is unknown to us, we probably want to download it since it might actually
|
||||
// descend from the finalized root.
|
||||
// (Non-Lighthouse): Since we prune all blocks non-descendant from finalized checking the `db.block` database won't be useful to guard
|
||||
// against known bad fork blocks, so we throw PARENT_UNKNOWN for cases (1) and (2)
|
||||
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
||||
code: DataColumnSidecarErrorCode.PARENT_UNKNOWN,
|
||||
parentRoot,
|
||||
});
|
||||
}
|
||||
|
||||
// 8) [REJECT] The sidecar is from a higher slot than the sidecar's block's parent
|
||||
if (parentBlock.slot >= blockHeader.slot) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
||||
code: DataColumnSidecarErrorCode.NOT_LATER_THAN_PARENT,
|
||||
parentSlot: parentBlock.slot,
|
||||
slot: blockHeader.slot,
|
||||
});
|
||||
}
|
||||
|
||||
// getBlockSlotState also checks for whether the current finalized checkpoint is an ancestor of the block.
|
||||
// As a result, we throw an IGNORE (whereas the spec says we should REJECT for this scenario).
|
||||
// this is something we should change this in the future to make the code airtight to the spec.
|
||||
// 7) [REJECT] The sidecar's block's parent passes validation.
|
||||
const blockState = await chain.regen
|
||||
.getBlockSlotState(parentRoot, blockHeader.slot, {dontTransferCache: true}, RegenCaller.validateGossipBlock)
|
||||
.catch(() => {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
||||
code: DataColumnSidecarErrorCode.PARENT_UNKNOWN,
|
||||
parentRoot,
|
||||
});
|
||||
});
|
||||
|
||||
// 13) [REJECT] The sidecar is proposed by the expected proposer_index for the block's slot in the context of the current
|
||||
// shuffling (defined by block_header.parent_root/block_header.slot). If the proposer_index cannot
|
||||
// immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing
|
||||
// while proposers for the block's branch are calculated -- in such a case do not REJECT, instead IGNORE
|
||||
// this message.
|
||||
const proposerIndex = blockHeader.proposerIndex;
|
||||
const expectedProposerIndex = blockState.epochCtx.getBeaconProposer(blockHeader.slot);
|
||||
|
||||
if (proposerIndex !== expectedProposerIndex) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
||||
code: DataColumnSidecarErrorCode.INCORRECT_PROPOSER,
|
||||
actualProposerIndex: proposerIndex,
|
||||
expectedProposerIndex,
|
||||
});
|
||||
}
|
||||
|
||||
// 5) [REJECT] The proposer signature of sidecar.signed_block_header, is valid with respect to the block_header.proposer_index pubkey.
|
||||
const signatureSet = getBlockHeaderProposerSignatureSet(blockState, dataColumnSidecar.signedBlockHeader);
|
||||
// Don't batch so verification is not delayed
|
||||
if (
|
||||
!(await chain.bls.verifySignatureSets([signatureSet], {
|
||||
verifyOnMainThread: blockHeader.slot > chain.forkChoice.getHead().slot,
|
||||
}))
|
||||
) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
||||
code: DataColumnSidecarErrorCode.PROPOSAL_SIGNATURE_INVALID,
|
||||
});
|
||||
}
|
||||
|
||||
// 9) [REJECT] The current finalized_checkpoint is an ancestor of the sidecar's block
|
||||
// -- i.e. get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch)
|
||||
// == store.finalized_checkpoint.root
|
||||
// Handled by 7)
|
||||
|
||||
// 10) [REJECT] The sidecar's kzg_commitments field inclusion proof is valid as verified by
|
||||
// verify_data_column_sidecar_inclusion_proof
|
||||
// TODO: Can cache result on (commitments, proof, header) in the future
|
||||
const timer = metrics?.peerDas.dataColumnSidecarInclusionProofVerificationTime.startTimer();
|
||||
const valid = verifyDataColumnSidecarInclusionProof(dataColumnSidecar);
|
||||
timer?.();
|
||||
|
||||
if (!valid) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
||||
code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID,
|
||||
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
||||
columnIdx: dataColumnSidecar.index,
|
||||
});
|
||||
}
|
||||
|
||||
const kzgProofTimer = metrics?.peerDas.dataColumnSidecarKzgProofsVerificationTime.startTimer();
|
||||
// 11) [REJECT] The sidecar's column data is valid as verified by verify_data_column_sidecar_kzg_proofs
|
||||
try {
|
||||
await verifyDataColumnSidecarKzgProofs(
|
||||
dataColumnSidecar.kzgCommitments,
|
||||
Array.from({length: dataColumnSidecar.column.length}, () => dataColumnSidecar.index),
|
||||
dataColumnSidecar.column,
|
||||
dataColumnSidecar.kzgProofs
|
||||
);
|
||||
} catch {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
||||
code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF,
|
||||
slot: blockHeader.slot,
|
||||
columnIdx: dataColumnSidecar.index,
|
||||
});
|
||||
} finally {
|
||||
kzgProofTimer?.();
|
||||
}
|
||||
|
||||
// 12) [IGNORE] The sidecar is the first sidecar for the tuple (block_header.slot, block_header.proposer_index,
|
||||
// sidecar.index) with valid header signature, sidecar inclusion proof, and kzg proof
|
||||
// -- Handled in seenGossipBlockInput
|
||||
}
|
||||
|
||||
export async function validateDataColumnsSidecars(
|
||||
blockSlot: Slot,
|
||||
blockRoot: Root,
|
||||
blockKzgCommitments: deneb.BlobKzgCommitments,
|
||||
dataColumnSidecars: fulu.DataColumnSidecars,
|
||||
metrics: Metrics | null,
|
||||
opts: {skipProofsCheck: boolean} = {skipProofsCheck: false}
|
||||
): Promise<void> {
|
||||
// Skip verification if there are no data columns
|
||||
if (dataColumnSidecars.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const commitmentBytes: Uint8Array[] = [];
|
||||
const cellIndices: number[] = [];
|
||||
const cells: Uint8Array[] = [];
|
||||
const proofBytes: Uint8Array[] = [];
|
||||
|
||||
for (let sidecarsIndex = 0; sidecarsIndex < dataColumnSidecars.length; sidecarsIndex++) {
|
||||
const columnSidecar = dataColumnSidecars[sidecarsIndex];
|
||||
const {index: columnIndex, column, kzgCommitments, kzgProofs} = columnSidecar;
|
||||
const columnBlockHeader = columnSidecar.signedBlockHeader.message;
|
||||
const columnBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(columnBlockHeader);
|
||||
if (
|
||||
columnBlockHeader.slot !== blockSlot ||
|
||||
!byteArrayEquals(columnBlockRoot, blockRoot) ||
|
||||
kzgCommitments.length === 0 ||
|
||||
blockKzgCommitments.length === 0 ||
|
||||
blockKzgCommitments.length !== kzgCommitments.length ||
|
||||
blockKzgCommitments
|
||||
.map((commitment, i) => byteArrayEquals(commitment, kzgCommitments[i]))
|
||||
.filter((result) => result === false).length
|
||||
) {
|
||||
throw new Error(
|
||||
`Invalid data column sidecar slot=${columnBlockHeader.slot} columnBlockRoot=${toRootHex(columnBlockRoot)} columnIndex=${columnIndex} for the block blockRoot=${toRootHex(blockRoot)} slot=${blockSlot} sidecarsIndex=${sidecarsIndex} kzgCommitments=${kzgCommitments.length} blockKzgCommitments=${blockKzgCommitments.length}`
|
||||
);
|
||||
}
|
||||
|
||||
if (columnIndex >= NUMBER_OF_COLUMNS) {
|
||||
throw new Error(
|
||||
`Invalid data sidecar columnIndex=${columnIndex} in slot=${blockSlot} blockRoot=${toRootHex(blockRoot)} sidecarsIndex=${sidecarsIndex}`
|
||||
);
|
||||
}
|
||||
|
||||
if (column.length !== kzgCommitments.length || column.length !== kzgProofs.length) {
|
||||
throw new Error(
|
||||
`Invalid data sidecar array lengths for columnIndex=${columnIndex} in slot=${blockSlot} blockRoot=${toRootHex(blockRoot)}`
|
||||
);
|
||||
}
|
||||
|
||||
commitmentBytes.push(...kzgCommitments);
|
||||
cellIndices.push(...Array.from({length: column.length}, () => columnIndex));
|
||||
cells.push(...column);
|
||||
proofBytes.push(...kzgProofs);
|
||||
}
|
||||
|
||||
if (opts.skipProofsCheck) {
|
||||
return;
|
||||
}
|
||||
|
||||
let valid: boolean;
|
||||
try {
|
||||
const timer = metrics?.peerDas.kzgVerificationDataColumnBatchTime.startTimer();
|
||||
valid = await kzg.asyncVerifyCellKzgProofBatch(commitmentBytes, cellIndices, cells, proofBytes);
|
||||
timer?.();
|
||||
} catch (err) {
|
||||
(err as Error).message =
|
||||
`Error in verifyCellKzgProofBatch for slot=${blockSlot} blockRoot=${toRootHex(blockRoot)} commitmentBytes=${commitmentBytes.length} cellIndices=${cellIndices.length} cells=${cells.length} proofBytes=${proofBytes.length}`;
|
||||
throw err;
|
||||
}
|
||||
|
||||
if (!valid) {
|
||||
throw new Error(`Invalid data column sidecars in slot=${blockSlot} blockRoot=${toRootHex(blockRoot)}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* SPEC FUNCTION
|
||||
* https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
*/
|
||||
export function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): void {
|
||||
if (dataColumnSidecar.index >= NUMBER_OF_COLUMNS) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
||||
code: DataColumnSidecarErrorCode.INVALID_INDEX,
|
||||
columnIdx: dataColumnSidecar.index,
|
||||
});
|
||||
}
|
||||
|
||||
if (dataColumnSidecar.kzgCommitments.length === 0) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
||||
code: DataColumnSidecarErrorCode.NO_COMMITMENTS,
|
||||
columnIdx: dataColumnSidecar.index,
|
||||
});
|
||||
}
|
||||
|
||||
if (
|
||||
dataColumnSidecar.column.length !== dataColumnSidecar.kzgCommitments.length ||
|
||||
dataColumnSidecar.column.length !== dataColumnSidecar.kzgProofs.length
|
||||
) {
|
||||
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
||||
code: DataColumnSidecarErrorCode.MISMATCHED_LENGTHS,
|
||||
columnLength: dataColumnSidecar.column.length,
|
||||
commitmentsLength: dataColumnSidecar.kzgCommitments.length,
|
||||
proofsLength: dataColumnSidecar.kzgProofs.length,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* SPEC FUNCTION
|
||||
* https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
*/
|
||||
export async function verifyDataColumnSidecarKzgProofs(
|
||||
commitments: Uint8Array[],
|
||||
cellIndices: number[],
|
||||
cells: Uint8Array[],
|
||||
proofs: Uint8Array[]
|
||||
): Promise<void> {
|
||||
let valid: boolean;
|
||||
try {
|
||||
valid = await kzg.asyncVerifyCellKzgProofBatch(commitments, cellIndices, cells, proofs);
|
||||
} catch (e) {
|
||||
(e as Error).message = `Error on asyncVerifyCellKzgProofBatch: ${(e as Error).message}`;
|
||||
throw e;
|
||||
}
|
||||
if (!valid) {
|
||||
throw Error("Invalid verifyCellKzgProofBatch");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* SPEC FUNCTION
|
||||
* https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#verify_data_column_sidecar_inclusion_proof
|
||||
*/
|
||||
export function verifyDataColumnSidecarInclusionProof(dataColumnSidecar: fulu.DataColumnSidecar): boolean {
|
||||
return verifyMerkleBranch(
|
||||
ssz.deneb.BlobKzgCommitments.hashTreeRoot(dataColumnSidecar.kzgCommitments),
|
||||
dataColumnSidecar.kzgCommitmentsInclusionProof,
|
||||
KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH,
|
||||
KZG_COMMITMENTS_SUBTREE_INDEX,
|
||||
dataColumnSidecar.signedBlockHeader.message.bodyRoot
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* SPEC FUNCTION
|
||||
* https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#compute_subnet_for_data_column_sidecar
|
||||
*/
|
||||
export function computeSubnetForDataColumnSidecar(
|
||||
config: ChainConfig,
|
||||
columnSidecar: fulu.DataColumnSidecar
|
||||
): SubnetID {
|
||||
return columnSidecar.index % config.DATA_COLUMN_SIDECAR_SUBNET_COUNT;
|
||||
}
|
||||
@@ -114,6 +114,9 @@ function getContributionIndices(
|
||||
|
||||
const syncCommittee = state.epochCtx.getIndexedSyncCommittee(contribution.slot);
|
||||
// The bits in contribution.aggregationBits select validatorIndexes in the subcommittee starting at startIndex
|
||||
const subcommitteeIndices = syncCommittee.validatorIndices.slice(startIndex, startIndex + SYNC_COMMITTEE_SUBNET_SIZE);
|
||||
return contribution.aggregationBits.intersectValues(subcommitteeIndices);
|
||||
const subcommitteeValidatorIndices = syncCommittee.validatorIndices.slice(
|
||||
startIndex,
|
||||
startIndex + SYNC_COMMITTEE_SUBNET_SIZE
|
||||
);
|
||||
return contribution.aggregationBits.intersectValues(subcommitteeValidatorIndices);
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@ import {
|
||||
BlockArchiveRepository,
|
||||
BlockRepository,
|
||||
CheckpointHeaderRepository,
|
||||
DataColumnSidecarArchiveRepository,
|
||||
DataColumnSidecarRepository,
|
||||
DepositDataRootRepository,
|
||||
DepositEventRepository,
|
||||
Eth1DataRepository,
|
||||
@@ -34,6 +36,8 @@ export class BeaconDb implements IBeaconDb {
|
||||
|
||||
blobSidecars: BlobSidecarsRepository;
|
||||
blobSidecarsArchive: BlobSidecarsArchiveRepository;
|
||||
dataColumnSidecar: DataColumnSidecarRepository;
|
||||
dataColumnSidecarArchive: DataColumnSidecarArchiveRepository;
|
||||
|
||||
stateArchive: StateArchiveRepository;
|
||||
checkpointState: CheckpointStateRepository;
|
||||
@@ -67,6 +71,8 @@ export class BeaconDb implements IBeaconDb {
|
||||
|
||||
this.blobSidecars = new BlobSidecarsRepository(config, db);
|
||||
this.blobSidecarsArchive = new BlobSidecarsArchiveRepository(config, db);
|
||||
this.dataColumnSidecar = new DataColumnSidecarRepository(config, db);
|
||||
this.dataColumnSidecarArchive = new DataColumnSidecarArchiveRepository(config, db);
|
||||
|
||||
this.stateArchive = new StateArchiveRepository(config, db);
|
||||
this.checkpointState = new CheckpointStateRepository(config, db);
|
||||
|
||||
@@ -61,6 +61,9 @@ export enum Bucket {
|
||||
// 54 was for bestPartialLightClientUpdate, allocate a fresh one
|
||||
// lightClient_bestLightClientUpdate = 55, // SyncPeriod -> LightClientUpdate // DEPRECATED on v1.5.0
|
||||
lightClient_bestLightClientUpdate = 56, // SyncPeriod -> [Slot, LightClientUpdate]
|
||||
|
||||
allForks_dataColumnSidecars = 57, // FULU BeaconBlockRoot -> DataColumnSidecars
|
||||
allForks_dataColumnSidecarsArchive = 58, // FULU BeaconBlockSlot -> DataColumnSidecars
|
||||
}
|
||||
|
||||
export function getBucketNameByValue<T extends Bucket>(enumValue: T): keyof typeof Bucket {
|
||||
|
||||
@@ -10,6 +10,8 @@ import {
|
||||
BlockArchiveRepository,
|
||||
BlockRepository,
|
||||
CheckpointHeaderRepository,
|
||||
DataColumnSidecarArchiveRepository,
|
||||
DataColumnSidecarRepository,
|
||||
DepositDataRootRepository,
|
||||
DepositEventRepository,
|
||||
Eth1DataRepository,
|
||||
@@ -34,6 +36,8 @@ export interface IBeaconDb {
|
||||
|
||||
blobSidecars: BlobSidecarsRepository;
|
||||
blobSidecarsArchive: BlobSidecarsArchiveRepository;
|
||||
dataColumnSidecar: DataColumnSidecarRepository;
|
||||
dataColumnSidecarArchive: DataColumnSidecarArchiveRepository;
|
||||
|
||||
// finalized states
|
||||
stateArchive: StateArchiveRepository;
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {Db, PrefixedRepository} from "@lodestar/db";
|
||||
import {NUMBER_OF_COLUMNS} from "@lodestar/params";
|
||||
import {ColumnIndex, Root, fulu, ssz} from "@lodestar/types";
|
||||
import {bytesToInt, intToBytes} from "@lodestar/utils";
|
||||
import {Bucket, getBucketNameByValue} from "../buckets.js";
|
||||
|
||||
type BlockRoot = Root;
|
||||
|
||||
/**
|
||||
* DataColumnSidecarsRepository
|
||||
* Used to store `unfinalized` DataColumnSidecars
|
||||
*
|
||||
* Indexed data by `blockRoot` + `columnIndex`
|
||||
*/
|
||||
export class DataColumnSidecarRepository extends PrefixedRepository<BlockRoot, ColumnIndex, fulu.DataColumnSidecar> {
|
||||
constructor(config: ChainForkConfig, db: Db) {
|
||||
const bucket = Bucket.allForks_dataColumnSidecars;
|
||||
super(config, db, bucket, ssz.fulu.DataColumnSidecar, getBucketNameByValue(bucket));
|
||||
}
|
||||
|
||||
/**
|
||||
* Id is hashTreeRoot of unsigned BeaconBlock
|
||||
*/
|
||||
getId(value: fulu.DataColumnSidecar): ColumnIndex {
|
||||
return value.index;
|
||||
}
|
||||
|
||||
encodeKeyRaw(prefix: BlockRoot, id: ColumnIndex): Uint8Array {
|
||||
return Buffer.concat([prefix, intToBytes(id, 4)]);
|
||||
}
|
||||
|
||||
decodeKeyRaw(raw: Uint8Array): {prefix: BlockRoot; id: ColumnIndex} {
|
||||
return {
|
||||
prefix: raw.slice(0, 32) as BlockRoot,
|
||||
id: bytesToInt(raw.slice(32, 36)) as ColumnIndex,
|
||||
};
|
||||
}
|
||||
|
||||
getMaxKeyRaw(prefix: BlockRoot): Uint8Array {
|
||||
return Buffer.concat([prefix, intToBytes(NUMBER_OF_COLUMNS, 4)]);
|
||||
}
|
||||
|
||||
getMinKeyRaw(prefix: BlockRoot): Uint8Array {
|
||||
return Buffer.concat([prefix, intToBytes(0, 4)]);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {Db, PrefixedRepository} from "@lodestar/db";
|
||||
import {NUMBER_OF_COLUMNS} from "@lodestar/params";
|
||||
import {ColumnIndex, Slot, fulu, ssz} from "@lodestar/types";
|
||||
import {bytesToInt, intToBytes} from "@lodestar/utils";
|
||||
import {Bucket, getBucketNameByValue} from "../buckets.js";
|
||||
|
||||
/**
|
||||
* DataColumnSidecarsRepository
|
||||
* Used to store `finalized` DataColumnSidecars
|
||||
*
|
||||
* Indexed data by `slot` + `columnIndex`
|
||||
*/
|
||||
export class DataColumnSidecarArchiveRepository extends PrefixedRepository<Slot, ColumnIndex, fulu.DataColumnSidecar> {
|
||||
constructor(config: ChainForkConfig, db: Db) {
|
||||
const bucket = Bucket.allForks_dataColumnSidecarsArchive;
|
||||
super(config, db, bucket, ssz.fulu.DataColumnSidecar, getBucketNameByValue(bucket));
|
||||
}
|
||||
|
||||
/**
|
||||
* Id is hashTreeRoot of unsigned BeaconBlock
|
||||
*/
|
||||
getId(value: fulu.DataColumnSidecar): ColumnIndex {
|
||||
return value.index;
|
||||
}
|
||||
|
||||
encodeKeyRaw(prefix: Slot, id: ColumnIndex): Uint8Array {
|
||||
return Buffer.concat([intToBytes(prefix, 4), intToBytes(id, 4)]);
|
||||
}
|
||||
|
||||
decodeKeyRaw(raw: Uint8Array): {prefix: Slot; id: ColumnIndex} {
|
||||
return {
|
||||
prefix: bytesToInt(raw.slice(0, 4)) as Slot,
|
||||
id: bytesToInt(raw.slice(4, 8)) as ColumnIndex,
|
||||
};
|
||||
}
|
||||
|
||||
getMaxKeyRaw(prefix: Slot): Uint8Array {
|
||||
return Buffer.concat([intToBytes(prefix, 4), intToBytes(NUMBER_OF_COLUMNS, 4)]);
|
||||
}
|
||||
|
||||
getMinKeyRaw(prefix: Slot): Uint8Array {
|
||||
return Buffer.concat([intToBytes(prefix, 4), intToBytes(0, 4)]);
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,7 @@
|
||||
export {BlobSidecarsRepository} from "./blobSidecars.js";
|
||||
export {BlobSidecarsArchiveRepository} from "./blobSidecarsArchive.js";
|
||||
export {DataColumnSidecarRepository} from "./dataColumnSidecar.js";
|
||||
export {DataColumnSidecarArchiveRepository} from "./dataColumnSidecarArchive.js";
|
||||
|
||||
export {BlockRepository} from "./block.js";
|
||||
export {BlockArchiveRepository} from "./blockArchive.js";
|
||||
|
||||
@@ -10,7 +10,7 @@ import {getRootIndexKey, storeRootIndex} from "./stateArchiveIndex.js";
|
||||
export class StateArchiveRepository extends Repository<Slot, BeaconStateAllForks> {
|
||||
constructor(config: ChainForkConfig, db: Db) {
|
||||
// Pick some type but won't be used. Casted to any because no type can match `BeaconStateAllForks`
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
// biome-ignore lint/suspicious/noExplicitAny: We need to use `any` type here
|
||||
const type = ssz.phase0.BeaconState as any;
|
||||
const bucket = Bucket.allForks_stateArchive;
|
||||
super(config, db, bucket, type, getBucketNameByValue(bucket));
|
||||
|
||||
@@ -3,14 +3,14 @@ import {ApiClient as BuilderApi, getClient} from "@lodestar/api/builder";
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {Logger} from "@lodestar/logger";
|
||||
import {ForkPostBellatrix, SLOTS_PER_EPOCH} from "@lodestar/params";
|
||||
import {parseExecutionPayloadAndBlobsBundle, reconstructFullBlockOrContents} from "@lodestar/state-transition";
|
||||
import {parseExecutionPayloadAndBlobsBundle, reconstructSignedBlockContents} from "@lodestar/state-transition";
|
||||
import {
|
||||
BLSPubkey,
|
||||
Epoch,
|
||||
ExecutionPayloadHeader,
|
||||
Root,
|
||||
SignedBeaconBlockOrContents,
|
||||
SignedBlindedBeaconBlock,
|
||||
SignedBlockContents,
|
||||
Slot,
|
||||
Wei,
|
||||
WithOptionalBytes,
|
||||
@@ -42,6 +42,23 @@ export const defaultExecutionBuilderHttpOpts: ExecutionBuilderHttpOpts = {
|
||||
timeout: 12000,
|
||||
};
|
||||
|
||||
export enum BuilderStatus {
|
||||
/**
|
||||
* Builder is enabled and operational
|
||||
*/
|
||||
enabled = "enabled",
|
||||
/**
|
||||
* Builder is disabled due to failed status check
|
||||
*/
|
||||
disabled = "disabled",
|
||||
/**
|
||||
* Circuit breaker condition that is triggered when the node determines the chain is unhealthy.
|
||||
* When the circuit breaker is fired, proposers **MUST** not utilize the external builder
|
||||
* network and exclusively build locally.
|
||||
*/
|
||||
circuitBreaker = "circuit_breaker",
|
||||
}
|
||||
|
||||
/**
|
||||
* Expected error if builder does not provide a bid. Most of the time, this
|
||||
* is due to `min-bid` setting on the mev-boost side but in rare cases could
|
||||
@@ -71,7 +88,7 @@ export class ExecutionBuilderHttp implements IExecutionBuilder {
|
||||
readonly registrations: ValidatorRegistrationCache;
|
||||
readonly issueLocalFcUWithFeeRecipient?: string;
|
||||
// Builder needs to be explicity enabled using updateStatus
|
||||
status = false;
|
||||
status = BuilderStatus.disabled;
|
||||
faultInspectionWindow: number;
|
||||
allowedFaults: number;
|
||||
|
||||
@@ -109,7 +126,7 @@ export class ExecutionBuilderHttp implements IExecutionBuilder {
|
||||
* Beacon clients select randomized values from the following ranges when initializing
|
||||
* the circuit breaker (so at boot time and once for each unique boot).
|
||||
*
|
||||
* ALLOWED_FAULTS: between 1 and SLOTS_PER_EPOCH // 2
|
||||
* ALLOWED_FAULTS: between 1 and SLOTS_PER_EPOCH // 4
|
||||
* FAULT_INSPECTION_WINDOW: between SLOTS_PER_EPOCH and 2 * SLOTS_PER_EPOCH
|
||||
*
|
||||
*/
|
||||
@@ -117,23 +134,25 @@ export class ExecutionBuilderHttp implements IExecutionBuilder {
|
||||
opts.faultInspectionWindow ?? SLOTS_PER_EPOCH + Math.floor(Math.random() * SLOTS_PER_EPOCH),
|
||||
SLOTS_PER_EPOCH
|
||||
);
|
||||
// allowedFaults should be < faultInspectionWindow, limiting them to faultInspectionWindow/2
|
||||
// allowedFaults should be < faultInspectionWindow, limiting them to faultInspectionWindow/4
|
||||
this.allowedFaults = Math.min(
|
||||
opts.allowedFaults ?? Math.floor(this.faultInspectionWindow / 2),
|
||||
Math.floor(this.faultInspectionWindow / 2)
|
||||
opts.allowedFaults ?? Math.floor(this.faultInspectionWindow / 4),
|
||||
Math.floor(this.faultInspectionWindow / 4)
|
||||
);
|
||||
}
|
||||
|
||||
updateStatus(shouldEnable: boolean): void {
|
||||
this.status = shouldEnable;
|
||||
updateStatus(status: BuilderStatus): void {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
async checkStatus(): Promise<void> {
|
||||
try {
|
||||
(await this.api.status()).assertOk();
|
||||
} catch (e) {
|
||||
// Disable if the status was enabled
|
||||
this.status = false;
|
||||
if (this.status === BuilderStatus.enabled) {
|
||||
// Disable if the status was enabled
|
||||
this.status = BuilderStatus.disabled;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
@@ -182,7 +201,7 @@ export class ExecutionBuilderHttp implements IExecutionBuilder {
|
||||
|
||||
async submitBlindedBlock(
|
||||
signedBlindedBlock: WithOptionalBytes<SignedBlindedBeaconBlock>
|
||||
): Promise<SignedBeaconBlockOrContents> {
|
||||
): Promise<SignedBlockContents> {
|
||||
const res = await this.api.submitBlindedBlock(
|
||||
{signedBlindedBlock},
|
||||
{retries: 2, requestWireFormat: this.sszSupported ? WireFormat.ssz : WireFormat.json}
|
||||
@@ -195,8 +214,8 @@ export class ExecutionBuilderHttp implements IExecutionBuilder {
|
||||
// invalid signature, but there is no recourse to this anyway so lets just proceed and will
|
||||
// probably need diagonis if this block turns out to be invalid because of some bug
|
||||
//
|
||||
const contents = blobsBundle ? {blobs: blobsBundle.blobs, kzgProofs: blobsBundle.proofs} : null;
|
||||
return reconstructFullBlockOrContents(signedBlindedBlock.data, {executionPayload, contents});
|
||||
const fork = this.config.getForkName(signedBlindedBlock.data.message.slot);
|
||||
return reconstructSignedBlockContents(fork, signedBlindedBlock.data, executionPayload, blobsBundle);
|
||||
}
|
||||
|
||||
async submitBlindedBlockNoResponse(signedBlindedBlock: WithOptionalBytes<SignedBlindedBeaconBlock>): Promise<void> {
|
||||
|
||||
@@ -4,8 +4,8 @@ import {
|
||||
Epoch,
|
||||
ExecutionPayloadHeader,
|
||||
Root,
|
||||
SignedBeaconBlockOrContents,
|
||||
SignedBlindedBeaconBlock,
|
||||
SignedBlockContents,
|
||||
Slot,
|
||||
Wei,
|
||||
WithOptionalBytes,
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
electra,
|
||||
} from "@lodestar/types";
|
||||
import {ValidatorRegistration} from "./cache.js";
|
||||
import {BuilderStatus} from "./http.js";
|
||||
|
||||
export interface IExecutionBuilder {
|
||||
/**
|
||||
@@ -22,13 +23,13 @@ export interface IExecutionBuilder {
|
||||
* fetch
|
||||
*/
|
||||
readonly issueLocalFcUWithFeeRecipient?: string;
|
||||
status: boolean;
|
||||
status: BuilderStatus;
|
||||
/** Window to inspect missed slots for enabling/disabling builder circuit breaker */
|
||||
faultInspectionWindow: number;
|
||||
/** Number of missed slots allowed in the faultInspectionWindow for builder circuit*/
|
||||
allowedFaults: number;
|
||||
|
||||
updateStatus(shouldEnable: boolean): void;
|
||||
updateStatus(status: BuilderStatus): void;
|
||||
checkStatus(): Promise<void>;
|
||||
registerValidator(epoch: Epoch, registrations: bellatrix.SignedValidatorRegistrationV1[]): Promise<void>;
|
||||
getValidatorRegistration(pubkey: BLSPubkey): ValidatorRegistration | undefined;
|
||||
@@ -43,8 +44,6 @@ export interface IExecutionBuilder {
|
||||
blobKzgCommitments?: deneb.BlobKzgCommitments;
|
||||
executionRequests?: electra.ExecutionRequests;
|
||||
}>;
|
||||
submitBlindedBlock(
|
||||
signedBlindedBlock: WithOptionalBytes<SignedBlindedBeaconBlock>
|
||||
): Promise<SignedBeaconBlockOrContents>;
|
||||
submitBlindedBlock(signedBlindedBlock: WithOptionalBytes<SignedBlindedBeaconBlock>): Promise<SignedBlockContents>;
|
||||
submitBlindedBlockNoResponse(signedBlindedBlock: WithOptionalBytes<SignedBlindedBeaconBlock>): Promise<void>;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
import {Logger} from "@lodestar/logger";
|
||||
import {ForkName, ForkSeq, SLOTS_PER_EPOCH} from "@lodestar/params";
|
||||
import {ExecutionPayload, ExecutionRequests, Root, RootHex, Wei} from "@lodestar/types";
|
||||
import {ForkName, ForkPostFulu, ForkPreFulu, ForkSeq, SLOTS_PER_EPOCH, isForkPostFulu} from "@lodestar/params";
|
||||
import {BlobsBundle, ExecutionPayload, ExecutionRequests, Root, RootHex, Wei} from "@lodestar/types";
|
||||
import {BlobAndProof} from "@lodestar/types/deneb";
|
||||
import {BlobAndProofV2} from "@lodestar/types/fulu";
|
||||
import {strip0xPrefix} from "@lodestar/utils";
|
||||
import {
|
||||
ErrorJsonRpcResponse,
|
||||
@@ -9,7 +10,6 @@ import {
|
||||
IJsonRpcHttpClient,
|
||||
JsonRpcHttpClientEvent,
|
||||
ReqOpts,
|
||||
parseJsonRpcErrorCode,
|
||||
} from "../../eth1/provider/jsonRpcHttpClient.js";
|
||||
import {bytesToData, numToQuantity} from "../../eth1/provider/utils.js";
|
||||
import {Metrics} from "../../metrics/index.js";
|
||||
@@ -17,7 +17,6 @@ import {EPOCHS_PER_BATCH} from "../../sync/constants.js";
|
||||
import {getLodestarClientVersion} from "../../util/metadata.js";
|
||||
import {JobItemQueue} from "../../util/queue/index.js";
|
||||
import {
|
||||
BlobsBundle,
|
||||
ClientCode,
|
||||
ClientVersion,
|
||||
ExecutePayloadResponse,
|
||||
@@ -35,6 +34,7 @@ import {
|
||||
ExecutionPayloadBody,
|
||||
assertReqSizeLimit,
|
||||
deserializeBlobAndProofs,
|
||||
deserializeBlobAndProofsV2,
|
||||
deserializeExecutionPayloadBody,
|
||||
parseExecutionPayload,
|
||||
serializeBeaconBlockRoot,
|
||||
@@ -99,10 +99,22 @@ export const defaultExecutionEngineHttpOpts: ExecutionEngineHttpOpts = {
|
||||
*/
|
||||
const QUEUE_MAX_LENGTH = EPOCHS_PER_BATCH * SLOTS_PER_EPOCH * 2;
|
||||
|
||||
/**
|
||||
* Maximum number of version hashes that can be sent in a getBlobs request
|
||||
* Clients must support at least 128 versionedHashes, so we avoid sending more
|
||||
* https://github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md#specification-3
|
||||
*/
|
||||
const MAX_VERSIONED_HASHES = 128;
|
||||
|
||||
// Define static options once to prevent extra allocations
|
||||
const notifyNewPayloadOpts: ReqOpts = {routeId: "notifyNewPayload"};
|
||||
const forkchoiceUpdatedV1Opts: ReqOpts = {routeId: "forkchoiceUpdated"};
|
||||
const getPayloadOpts: ReqOpts = {routeId: "getPayload"};
|
||||
const getPayloadBodiesByHashOpts: ReqOpts = {routeId: "getPayloadBodiesByHash"};
|
||||
const getPayloadBodiesByRangeOpts: ReqOpts = {routeId: "getPayloadBodiesByRange"};
|
||||
const getBlobsV1Opts: ReqOpts = {routeId: "getBlobsV1"};
|
||||
const getBlobsV2Opts: ReqOpts = {routeId: "getBlobsV2"};
|
||||
const getClientVersionOpts: ReqOpts = {routeId: "getClientVersion"};
|
||||
|
||||
/**
|
||||
* based on Ethereum JSON-RPC API and inherits the following properties of this standard:
|
||||
@@ -115,7 +127,6 @@ const getPayloadOpts: ReqOpts = {routeId: "getPayload"};
|
||||
*/
|
||||
export class ExecutionEngineHttp implements IExecutionEngine {
|
||||
private logger: Logger;
|
||||
private lastGetBlobsErrorTime = 0;
|
||||
|
||||
// The default state is ONLINE, it will be updated to SYNCING once we receive the first payload
|
||||
// This assumption is better than the OFFLINE state, since we can't be sure if the EL is offline and being offline may trigger some notifications
|
||||
@@ -415,14 +426,26 @@ export class ExecutionEngineHttp implements IExecutionEngine {
|
||||
executionRequests?: ExecutionRequests;
|
||||
shouldOverrideBuilder?: boolean;
|
||||
}> {
|
||||
const method =
|
||||
ForkSeq[fork] >= ForkSeq.electra
|
||||
? "engine_getPayloadV4"
|
||||
: ForkSeq[fork] >= ForkSeq.deneb
|
||||
? "engine_getPayloadV3"
|
||||
: ForkSeq[fork] >= ForkSeq.capella
|
||||
? "engine_getPayloadV2"
|
||||
: "engine_getPayloadV1";
|
||||
let method: keyof EngineApiRpcReturnTypes;
|
||||
switch (fork) {
|
||||
case ForkName.phase0:
|
||||
case ForkName.altair:
|
||||
case ForkName.bellatrix:
|
||||
method = "engine_getPayloadV1";
|
||||
break;
|
||||
case ForkName.capella:
|
||||
method = "engine_getPayloadV2";
|
||||
break;
|
||||
case ForkName.deneb:
|
||||
method = "engine_getPayloadV3";
|
||||
break;
|
||||
case ForkName.electra:
|
||||
method = "engine_getPayloadV4";
|
||||
break;
|
||||
default:
|
||||
method = "engine_getPayloadV5";
|
||||
break;
|
||||
}
|
||||
const payloadResponse = await this.rpc.fetchWithRetries<
|
||||
EngineApiRpcReturnTypes[typeof method],
|
||||
EngineApiRpcParamTypes[typeof method]
|
||||
@@ -446,7 +469,7 @@ export class ExecutionEngineHttp implements IExecutionEngine {
|
||||
const response = await this.rpc.fetchWithRetries<
|
||||
EngineApiRpcReturnTypes[typeof method],
|
||||
EngineApiRpcParamTypes[typeof method]
|
||||
>({method, params: [blockHashes]});
|
||||
>({method, params: [blockHashes]}, getPayloadBodiesByHashOpts);
|
||||
return response.map(deserializeExecutionPayloadBody);
|
||||
}
|
||||
|
||||
@@ -462,54 +485,40 @@ export class ExecutionEngineHttp implements IExecutionEngine {
|
||||
const response = await this.rpc.fetchWithRetries<
|
||||
EngineApiRpcReturnTypes[typeof method],
|
||||
EngineApiRpcParamTypes[typeof method]
|
||||
>({method, params: [start, count]});
|
||||
>({method, params: [start, count]}, getPayloadBodiesByRangeOpts);
|
||||
return response.map(deserializeExecutionPayloadBody);
|
||||
}
|
||||
|
||||
async getBlobs(_fork: ForkName, versionedHashes: VersionedHashes): Promise<(BlobAndProof | null)[]> {
|
||||
// retry only after a day may be
|
||||
const GETBLOBS_RETRY_TIMEOUT = 256 * 32 * 12;
|
||||
const timeNow = Date.now() / 1000;
|
||||
const timeSinceLastFail = timeNow - this.lastGetBlobsErrorTime;
|
||||
if (timeSinceLastFail < GETBLOBS_RETRY_TIMEOUT) {
|
||||
// do not try getblobs since it might not be available
|
||||
this.logger.debug(
|
||||
`disabled engine_getBlobsV1 api call since last failed < GETBLOBS_RETRY_TIMEOUT=${GETBLOBS_RETRY_TIMEOUT}`,
|
||||
timeSinceLastFail
|
||||
);
|
||||
throw Error(
|
||||
`engine_getBlobsV1 call recently failed timeSinceLastFail=${timeSinceLastFail} < GETBLOBS_RETRY_TIMEOUT=${GETBLOBS_RETRY_TIMEOUT}`
|
||||
);
|
||||
}
|
||||
|
||||
const method = "engine_getBlobsV1";
|
||||
assertReqSizeLimit(versionedHashes.length, 128);
|
||||
async getBlobs(fork: ForkPostFulu, versionedHashes: VersionedHashes): Promise<BlobAndProofV2[] | null>;
|
||||
async getBlobs(fork: ForkPreFulu, versionedHashes: VersionedHashes): Promise<(BlobAndProof | null)[]>;
|
||||
async getBlobs(
|
||||
fork: ForkName,
|
||||
versionedHashes: VersionedHashes
|
||||
): Promise<BlobAndProofV2[] | (BlobAndProof | null)[] | null> {
|
||||
assertReqSizeLimit(versionedHashes.length, MAX_VERSIONED_HASHES);
|
||||
const versionedHashesHex = versionedHashes.map(bytesToData);
|
||||
let response = await this.rpc
|
||||
.fetchWithRetries<EngineApiRpcReturnTypes[typeof method], EngineApiRpcParamTypes[typeof method]>({
|
||||
method,
|
||||
params: [versionedHashesHex],
|
||||
})
|
||||
.catch((e) => {
|
||||
if (e instanceof ErrorJsonRpcResponse && parseJsonRpcErrorCode(e.response.error.code) === "Method not found") {
|
||||
this.lastGetBlobsErrorTime = timeNow;
|
||||
this.logger.debug("disabling engine_getBlobsV1 api call since engine responded with method not availeble", {
|
||||
retryTimeout: GETBLOBS_RETRY_TIMEOUT,
|
||||
});
|
||||
}
|
||||
throw e;
|
||||
});
|
||||
|
||||
// handle nethermind buggy response
|
||||
// see: https://discord.com/channels/595666850260713488/1293605631785304088/1298956894274060301
|
||||
if (
|
||||
(response as unknown as {blobsAndProofs: EngineApiRpcReturnTypes[typeof method]}).blobsAndProofs !== undefined
|
||||
) {
|
||||
response = (response as unknown as {blobsAndProofs: EngineApiRpcReturnTypes[typeof method]}).blobsAndProofs;
|
||||
if (isForkPostFulu(fork)) {
|
||||
return await this.getBlobsV2(versionedHashesHex);
|
||||
}
|
||||
return await this.getBlobsV1(versionedHashesHex);
|
||||
}
|
||||
|
||||
if (response.length !== versionedHashes.length) {
|
||||
const error = `Invalid engine_getBlobsV1 response length=${response.length} versionedHashes=${versionedHashes.length}`;
|
||||
private async getBlobsV1(versionedHashesHex: string[]) {
|
||||
const response = await this.rpc.fetchWithRetries<
|
||||
EngineApiRpcReturnTypes["engine_getBlobsV1"],
|
||||
EngineApiRpcParamTypes["engine_getBlobsV1"]
|
||||
>(
|
||||
{
|
||||
method: "engine_getBlobsV1",
|
||||
params: [versionedHashesHex],
|
||||
},
|
||||
getBlobsV1Opts
|
||||
);
|
||||
|
||||
const invalidLength = response.length !== versionedHashesHex.length;
|
||||
|
||||
if (invalidLength) {
|
||||
const error = `Invalid engine_getBlobsV1 response length=${response.length} versionedHashes=${versionedHashesHex.length}`;
|
||||
this.logger.error(error);
|
||||
throw Error(error);
|
||||
}
|
||||
@@ -517,13 +526,37 @@ export class ExecutionEngineHttp implements IExecutionEngine {
|
||||
return response.map(deserializeBlobAndProofs);
|
||||
}
|
||||
|
||||
private async getBlobsV2(versionedHashesHex: string[]) {
|
||||
const response = await this.rpc.fetchWithRetries<
|
||||
EngineApiRpcReturnTypes["engine_getBlobsV2"],
|
||||
EngineApiRpcParamTypes["engine_getBlobsV2"]
|
||||
>(
|
||||
{
|
||||
method: "engine_getBlobsV2",
|
||||
params: [versionedHashesHex],
|
||||
},
|
||||
getBlobsV2Opts
|
||||
);
|
||||
|
||||
// engine_getBlobsV2 does not return partial responses. It returns null if any blob is not found
|
||||
const invalidLength = !!response && response.length !== versionedHashesHex.length;
|
||||
|
||||
if (invalidLength) {
|
||||
const error = `Invalid engine_getBlobsV2 response length=${response?.length ?? "null"} versionedHashes=${versionedHashesHex.length}`;
|
||||
this.logger.error(error);
|
||||
throw Error(error);
|
||||
}
|
||||
|
||||
return !response ? null : response.map(deserializeBlobAndProofsV2);
|
||||
}
|
||||
|
||||
private async getClientVersion(clientVersion: ClientVersion): Promise<ClientVersion[]> {
|
||||
const method = "engine_getClientVersionV1";
|
||||
|
||||
const response = await this.rpc.fetchWithRetries<
|
||||
EngineApiRpcReturnTypes[typeof method],
|
||||
EngineApiRpcParamTypes[typeof method]
|
||||
>({method, params: [{...clientVersion, commit: `0x${clientVersion.commit}`}]});
|
||||
>({method, params: [{...clientVersion, commit: `0x${clientVersion.commit}`}]}, getClientVersionOpts);
|
||||
|
||||
const clientVersions = response.map((cv) => {
|
||||
const code = cv.code in ClientCode ? ClientCode[cv.code as keyof typeof ClientCode] : ClientCode.XX;
|
||||
|
||||
@@ -1,6 +1,14 @@
|
||||
import {CONSOLIDATION_REQUEST_TYPE, DEPOSIT_REQUEST_TYPE, ForkName, WITHDRAWAL_REQUEST_TYPE} from "@lodestar/params";
|
||||
import {ExecutionPayload, ExecutionRequests, Root, RootHex, Wei, capella} from "@lodestar/types";
|
||||
import {Blob, BlobAndProof, KZGCommitment, KZGProof} from "@lodestar/types/deneb";
|
||||
import {
|
||||
CONSOLIDATION_REQUEST_TYPE,
|
||||
DEPOSIT_REQUEST_TYPE,
|
||||
ForkName,
|
||||
ForkPostFulu,
|
||||
ForkPreFulu,
|
||||
WITHDRAWAL_REQUEST_TYPE,
|
||||
} from "@lodestar/params";
|
||||
import {BlobsBundle, ExecutionPayload, ExecutionRequests, Root, RootHex, Wei, capella} from "@lodestar/types";
|
||||
import {BlobAndProof} from "@lodestar/types/deneb";
|
||||
import {BlobAndProofV2} from "@lodestar/types/fulu";
|
||||
|
||||
import {DATA} from "../../eth1/provider/utils.js";
|
||||
import {PayloadId, PayloadIdCache, WithdrawalV1} from "./payloadIdCache.js";
|
||||
@@ -99,16 +107,6 @@ export type PayloadAttributes = {
|
||||
parentBeaconBlockRoot?: Uint8Array;
|
||||
};
|
||||
|
||||
export type BlobsBundle = {
|
||||
/**
|
||||
* Execution payload `blockHash` for the caller to sanity-check the consistency with the `engine_getPayload` call
|
||||
* https://github.com/protolambda/execution-apis/blob/bf44a8d08ab34b861ef97fa9ef5c5e7806194547/src/engine/blob-extension.md?plain=1#L49
|
||||
*/
|
||||
commitments: KZGCommitment[];
|
||||
blobs: Blob[];
|
||||
proofs: KZGProof[];
|
||||
};
|
||||
|
||||
export type ClientVersion = {
|
||||
code: ClientCode;
|
||||
name: string;
|
||||
@@ -189,5 +187,6 @@ export interface IExecutionEngine {
|
||||
|
||||
getPayloadBodiesByRange(fork: ForkName, start: number, count: number): Promise<(ExecutionPayloadBody | null)[]>;
|
||||
|
||||
getBlobs(fork: ForkName, versionedHashes: VersionedHashes): Promise<(BlobAndProof | null)[]>;
|
||||
getBlobs(fork: ForkPostFulu, versionedHashes: VersionedHashes): Promise<BlobAndProofV2[] | null>;
|
||||
getBlobs(fork: ForkPreFulu, versionedHashes: VersionedHashes): Promise<(BlobAndProof | null)[]>;
|
||||
}
|
||||
|
||||
@@ -5,12 +5,14 @@ import {
|
||||
FIELD_ELEMENTS_PER_BLOB,
|
||||
ForkName,
|
||||
ForkPostBellatrix,
|
||||
ForkPostCapella,
|
||||
ForkSeq,
|
||||
} from "@lodestar/params";
|
||||
import {RootHex, bellatrix, deneb, ssz} from "@lodestar/types";
|
||||
import {fromHex, toHex} from "@lodestar/utils";
|
||||
import {ExecutionPayload, RootHex, bellatrix, deneb, ssz} from "@lodestar/types";
|
||||
import {fromHex, toHex, toRootHex} from "@lodestar/utils";
|
||||
import {ZERO_HASH_HEX} from "../../constants/index.js";
|
||||
import {quantityToNum} from "../../eth1/provider/utils.js";
|
||||
import {INTEROP_BLOCK_HASH} from "../../node/utils/interop/state.js";
|
||||
import {kzgCommitmentToVersionedHash} from "../../util/blobs.js";
|
||||
import {kzg} from "../../util/kzg.js";
|
||||
import {ClientCode, ExecutionPayloadStatus, PayloadIdCache} from "./interface.js";
|
||||
@@ -20,10 +22,12 @@ import {
|
||||
EngineApiRpcReturnTypes,
|
||||
ExecutionPayloadBodyRpc,
|
||||
ExecutionPayloadRpc,
|
||||
ExecutionRequestsRpc,
|
||||
PayloadStatus,
|
||||
deserializePayloadAttributes,
|
||||
serializeBlobsBundle,
|
||||
serializeExecutionPayload,
|
||||
serializeExecutionRequests,
|
||||
} from "./types.js";
|
||||
import {JsonRpcBackend} from "./utils.js";
|
||||
|
||||
@@ -32,10 +36,12 @@ const PRUNE_PAYLOAD_ID_AFTER_MS = 5000;
|
||||
|
||||
export type ExecutionEngineMockOpts = {
|
||||
genesisBlockHash: string;
|
||||
eth1BlockHash?: string;
|
||||
onlyPredefinedResponses?: boolean;
|
||||
capellaForkTimestamp?: number;
|
||||
denebForkTimestamp?: number;
|
||||
electraForkTimestamp?: number;
|
||||
fuluForkTimestamp?: number;
|
||||
};
|
||||
|
||||
type ExecutionBlock = {
|
||||
@@ -50,6 +56,7 @@ const TX_TYPE_EIP1559 = 2;
|
||||
type PreparedPayload = {
|
||||
executionPayload: ExecutionPayloadRpc;
|
||||
blobsBundle: BlobsBundleRpc;
|
||||
executionRequests: ExecutionRequestsRpc;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -84,6 +91,15 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
blockNumber: 0,
|
||||
});
|
||||
|
||||
const eth1BlockHash = opts.eth1BlockHash ?? toRootHex(INTEROP_BLOCK_HASH);
|
||||
|
||||
this.validBlocks.set(eth1BlockHash, {
|
||||
parentHash: ZERO_HASH_HEX,
|
||||
blockHash: eth1BlockHash,
|
||||
timestamp: 0,
|
||||
blockNumber: 1,
|
||||
});
|
||||
|
||||
this.handlers = {
|
||||
engine_newPayloadV1: this.notifyNewPayload.bind(this),
|
||||
engine_newPayloadV2: this.notifyNewPayload.bind(this),
|
||||
@@ -92,14 +108,16 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
engine_forkchoiceUpdatedV1: this.notifyForkchoiceUpdate.bind(this),
|
||||
engine_forkchoiceUpdatedV2: this.notifyForkchoiceUpdate.bind(this),
|
||||
engine_forkchoiceUpdatedV3: this.notifyForkchoiceUpdate.bind(this),
|
||||
engine_getPayloadV1: this.getPayload.bind(this),
|
||||
engine_getPayloadV2: this.getPayload.bind(this),
|
||||
engine_getPayloadV3: this.getPayload.bind(this),
|
||||
engine_getPayloadV4: this.getPayload.bind(this),
|
||||
engine_getPayloadV1: this.getPayloadV1.bind(this),
|
||||
engine_getPayloadV2: this.getPayloadV5.bind(this),
|
||||
engine_getPayloadV3: this.getPayloadV5.bind(this),
|
||||
engine_getPayloadV4: this.getPayloadV5.bind(this),
|
||||
engine_getPayloadV5: this.getPayloadV5.bind(this),
|
||||
engine_getPayloadBodiesByHashV1: this.getPayloadBodiesByHash.bind(this),
|
||||
engine_getPayloadBodiesByRangeV1: this.getPayloadBodiesByRange.bind(this),
|
||||
engine_getClientVersionV1: this.getClientVersionV1.bind(this),
|
||||
engine_getBlobsV1: this.getBlobs.bind(this),
|
||||
engine_getBlobsV2: this.getBlobsV2.bind(this),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -252,7 +270,6 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
// IF references an unknown payload or a payload that can't be validated because requisite data is missing
|
||||
// RETURN {payloadStatus: {status: SYNCING, latestValidHash: null, validationError: null}, payloadId: null}
|
||||
//
|
||||
// > TODO: Implement
|
||||
return {
|
||||
payloadStatus: {status: ExecutionPayloadStatus.SYNCING, latestValidHash: null, validationError: null},
|
||||
payloadId: null,
|
||||
@@ -317,9 +334,21 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
const blobs: deneb.Blob[] = [];
|
||||
const proofs: deneb.KZGProof[] = [];
|
||||
|
||||
// if post deneb, add between 0 and 2 blob transactions
|
||||
if (ForkSeq[fork] >= ForkSeq.deneb) {
|
||||
const denebTxCount = Math.round(2 * Math.random());
|
||||
if (ForkSeq[fork] >= ForkSeq.fulu) {
|
||||
// if post fulu, add between 0 and 3 data column transactions based on slot with BlobsBundleV2
|
||||
const fuluTxCount = executionPayload.blockNumber % 4;
|
||||
for (let i = 0; i < fuluTxCount; i++) {
|
||||
const blob = generateRandomBlob();
|
||||
const commitment = kzg.blobToKzgCommitment(blob);
|
||||
const {proofs: cellProofs} = kzg.computeCellsAndKzgProofs(blob);
|
||||
executionPayload.transactions.push(transactionForKzgCommitment(commitment));
|
||||
commitments.push(commitment);
|
||||
blobs.push(blob);
|
||||
proofs.push(...cellProofs);
|
||||
}
|
||||
} else if (ForkSeq[fork] >= ForkSeq.deneb && ForkSeq[fork] < ForkSeq.fulu) {
|
||||
// if post deneb, add between 0 and 2 blob transactions
|
||||
const denebTxCount = executionPayload.blockNumber % 3;
|
||||
for (let i = 0; i < denebTxCount; i++) {
|
||||
const blob = generateRandomBlob();
|
||||
const commitment = kzg.blobToKzgCommitment(blob);
|
||||
@@ -331,6 +360,10 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
}
|
||||
}
|
||||
|
||||
if (ForkSeq[fork] >= ForkSeq.capella) {
|
||||
(executionPayload as ExecutionPayload<ForkPostCapella>).withdrawals = ssz.capella.Withdrawals.defaultValue();
|
||||
}
|
||||
|
||||
this.preparingPayloads.set(payloadId, {
|
||||
executionPayload: serializeExecutionPayload(fork, executionPayload),
|
||||
blobsBundle: serializeBlobsBundle({
|
||||
@@ -338,6 +371,11 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
blobs,
|
||||
proofs,
|
||||
}),
|
||||
executionRequests: serializeExecutionRequests({
|
||||
deposits: ssz.electra.DepositRequests.defaultValue(),
|
||||
withdrawals: ssz.electra.WithdrawalRequests.defaultValue(),
|
||||
consolidations: ssz.electra.ConsolidationRequests.defaultValue(),
|
||||
}),
|
||||
});
|
||||
|
||||
// IF the payload is deemed VALID and the build process has begun
|
||||
@@ -364,9 +402,15 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
* 2. The call MUST be responded with 5: Unavailable payload error if the building process identified by the payloadId doesn't exist.
|
||||
* 3. Client software MAY stop the corresponding building process after serving this call.
|
||||
*/
|
||||
private getPayload(
|
||||
private getPayloadV1(
|
||||
payloadId: EngineApiRpcParamTypes["engine_getPayloadV1"][0]
|
||||
): EngineApiRpcReturnTypes["engine_getPayloadV1"] {
|
||||
return this.getPayloadV5(payloadId).executionPayload;
|
||||
}
|
||||
|
||||
private getPayloadV5(
|
||||
payloadId: EngineApiRpcParamTypes["engine_getPayloadV5"][0]
|
||||
): EngineApiRpcReturnTypes["engine_getPayloadV5"] {
|
||||
// 1. Given the payloadId client software MUST return the most recent version of the payload that is available in
|
||||
// the corresponding build process at the time of receiving the call.
|
||||
const payloadIdNbr = Number(payloadId);
|
||||
@@ -389,7 +433,12 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
}
|
||||
this.payloadsForDeletion.set(payloadIdNbr, now);
|
||||
|
||||
return payload.executionPayload;
|
||||
return {
|
||||
executionPayload: payload.executionPayload,
|
||||
blockValue: String(1e9),
|
||||
blobsBundle: payload.blobsBundle,
|
||||
executionRequests: payload.executionRequests,
|
||||
};
|
||||
}
|
||||
|
||||
private getClientVersionV1(
|
||||
@@ -404,10 +453,17 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
||||
return versionedHashes.map((_vh) => null);
|
||||
}
|
||||
|
||||
private getBlobsV2(
|
||||
_versionedHashes: EngineApiRpcParamTypes["engine_getBlobsV2"][0]
|
||||
): EngineApiRpcReturnTypes["engine_getBlobsV2"] {
|
||||
return null;
|
||||
}
|
||||
|
||||
private timestampToFork(timestamp: number): ForkPostBellatrix {
|
||||
if (timestamp > (this.opts.electraForkTimestamp ?? Infinity)) return ForkName.electra;
|
||||
if (timestamp > (this.opts.denebForkTimestamp ?? Infinity)) return ForkName.deneb;
|
||||
if (timestamp > (this.opts.capellaForkTimestamp ?? Infinity)) return ForkName.capella;
|
||||
if (timestamp >= (this.opts.fuluForkTimestamp ?? Infinity)) return ForkName.fulu;
|
||||
if (timestamp >= (this.opts.electraForkTimestamp ?? Infinity)) return ForkName.electra;
|
||||
if (timestamp >= (this.opts.denebForkTimestamp ?? Infinity)) return ForkName.deneb;
|
||||
if (timestamp >= (this.opts.capellaForkTimestamp ?? Infinity)) return ForkName.capella;
|
||||
return ForkName.bellatrix;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,20 @@ import {
|
||||
ForkSeq,
|
||||
WITHDRAWAL_REQUEST_TYPE,
|
||||
} from "@lodestar/params";
|
||||
import {ExecutionPayload, ExecutionRequests, Root, Wei, bellatrix, capella, deneb, electra, ssz} from "@lodestar/types";
|
||||
import {
|
||||
BlobsBundle,
|
||||
ExecutionPayload,
|
||||
ExecutionRequests,
|
||||
Root,
|
||||
Wei,
|
||||
bellatrix,
|
||||
capella,
|
||||
deneb,
|
||||
electra,
|
||||
ssz,
|
||||
} from "@lodestar/types";
|
||||
import {BlobAndProof} from "@lodestar/types/deneb";
|
||||
import {BlobAndProofV2} from "@lodestar/types/fulu";
|
||||
|
||||
import {
|
||||
DATA,
|
||||
@@ -21,7 +33,6 @@ import {
|
||||
quantityToNum,
|
||||
} from "../../eth1/provider/utils.js";
|
||||
import {
|
||||
BlobsBundle,
|
||||
ExecutionPayloadStatus,
|
||||
ExecutionRequestType,
|
||||
PayloadAttributes,
|
||||
@@ -62,6 +73,7 @@ export type EngineApiRpcParamTypes = {
|
||||
engine_getPayloadV2: [QUANTITY];
|
||||
engine_getPayloadV3: [QUANTITY];
|
||||
engine_getPayloadV4: [QUANTITY];
|
||||
engine_getPayloadV5: [QUANTITY];
|
||||
|
||||
/**
|
||||
* 1. Array of DATA - Array of block_hash field values of the ExecutionPayload structure
|
||||
@@ -80,6 +92,7 @@ export type EngineApiRpcParamTypes = {
|
||||
engine_getClientVersionV1: [ClientVersionRpc];
|
||||
|
||||
engine_getBlobsV1: [DATA[]];
|
||||
engine_getBlobsV2: [DATA[]];
|
||||
};
|
||||
|
||||
export type PayloadStatus = {
|
||||
@@ -116,6 +129,7 @@ export type EngineApiRpcReturnTypes = {
|
||||
engine_getPayloadV2: ExecutionPayloadResponse;
|
||||
engine_getPayloadV3: ExecutionPayloadResponse;
|
||||
engine_getPayloadV4: ExecutionPayloadResponse;
|
||||
engine_getPayloadV5: ExecutionPayloadResponse;
|
||||
|
||||
engine_getPayloadBodiesByHashV1: (ExecutionPayloadBodyRpc | null)[];
|
||||
|
||||
@@ -124,6 +138,7 @@ export type EngineApiRpcReturnTypes = {
|
||||
engine_getClientVersionV1: ClientVersionRpc[];
|
||||
|
||||
engine_getBlobsV1: (BlobAndProofRpc | null)[];
|
||||
engine_getBlobsV2: BlobAndProofV2Rpc[] | null;
|
||||
};
|
||||
|
||||
type ExecutionPayloadRpcWithValue = {
|
||||
@@ -134,7 +149,7 @@ type ExecutionPayloadRpcWithValue = {
|
||||
executionRequests?: ExecutionRequestsRpc;
|
||||
shouldOverrideBuilder?: boolean;
|
||||
};
|
||||
type ExecutionPayloadResponse = ExecutionPayloadRpc | ExecutionPayloadRpcWithValue;
|
||||
type ExecutionPayloadResponse = ExecutionPayloadRpcWithValue;
|
||||
|
||||
export type ExecutionPayloadBodyRpc = {
|
||||
transactions: DATA[];
|
||||
@@ -191,6 +206,11 @@ export type BlobAndProofRpc = {
|
||||
proof: DATA;
|
||||
};
|
||||
|
||||
export type BlobAndProofV2Rpc = {
|
||||
blob: DATA;
|
||||
proofs: DATA[];
|
||||
};
|
||||
|
||||
export type VersionedHashesRpc = DATA[];
|
||||
|
||||
export type PayloadAttributesRpc = {
|
||||
@@ -262,13 +282,15 @@ export function serializeVersionedHashes(vHashes: VersionedHashes): VersionedHas
|
||||
return vHashes.map(bytesToData);
|
||||
}
|
||||
|
||||
export function hasPayloadValue(response: ExecutionPayloadResponse): response is ExecutionPayloadRpcWithValue {
|
||||
export function hasPayloadValue(
|
||||
response: ExecutionPayloadResponse | ExecutionPayloadRpc
|
||||
): response is ExecutionPayloadRpcWithValue {
|
||||
return (response as ExecutionPayloadRpcWithValue).blockValue !== undefined;
|
||||
}
|
||||
|
||||
export function parseExecutionPayload(
|
||||
fork: ForkName,
|
||||
response: ExecutionPayloadResponse
|
||||
response: ExecutionPayloadResponse | ExecutionPayloadRpc
|
||||
): {
|
||||
executionPayload: ExecutionPayload;
|
||||
executionPayloadValue: Wei;
|
||||
@@ -390,7 +412,7 @@ export function serializeBlobsBundle(data: BlobsBundle): BlobsBundleRpc {
|
||||
return {
|
||||
commitments: data.commitments.map((kzg) => bytesToData(kzg)),
|
||||
blobs: data.blobs.map((blob) => bytesToData(blob)),
|
||||
proofs: data.blobs.map((proof) => bytesToData(proof)),
|
||||
proofs: data.proofs.map((proof) => bytesToData(proof)),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -563,6 +585,13 @@ export function deserializeBlobAndProofs(data: BlobAndProofRpc | null): BlobAndP
|
||||
: null;
|
||||
}
|
||||
|
||||
export function deserializeBlobAndProofsV2(data: BlobAndProofV2Rpc): BlobAndProofV2 {
|
||||
return {
|
||||
blob: dataToBytes(data.blob, BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB),
|
||||
proofs: data.proofs.map((proof) => dataToBytes(proof, 48)),
|
||||
};
|
||||
}
|
||||
|
||||
export function assertReqSizeLimit(blockHashesReqCount: number, count: number): void {
|
||||
if (blockHashesReqCount > count) {
|
||||
throw new Error(`Requested blocks must not be > ${count}`);
|
||||
|
||||
@@ -12,7 +12,7 @@ import {isQueueErrorAborted} from "../../util/queue/errors.js";
|
||||
import {ExecutionEngineState, ExecutionPayloadStatus} from "./interface.js";
|
||||
|
||||
export type JsonRpcBackend = {
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
// biome-ignore lint/suspicious/noExplicitAny: We need to use `any` type here
|
||||
readonly handlers: Record<string, (...args: any[]) => any>;
|
||||
};
|
||||
|
||||
@@ -27,7 +27,7 @@ export class ExecutionEngineMockJsonRpcClient implements IJsonRpcHttpClient {
|
||||
if (handler === undefined) {
|
||||
throw Error(`Unknown method ${payload.method}`);
|
||||
}
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
// biome-ignore lint/suspicious/noExplicitAny: We need to use `any` type here
|
||||
return handler(...(payload.params as any[])) as R;
|
||||
}, payload);
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import {ForkChoiceMetrics, getForkChoiceMetrics} from "@lodestar/fork-choice";
|
||||
import {BeaconStateTransitionMetrics, getMetrics} from "@lodestar/state-transition";
|
||||
import {Metric, Registry} from "prom-client";
|
||||
import {BeaconMetrics, createBeaconMetrics} from "./metrics/beacon.js";
|
||||
@@ -7,12 +8,14 @@ import {MetricsOptions} from "./options.js";
|
||||
import {RegistryMetricCreator} from "./utils/registryMetricCreator.js";
|
||||
|
||||
export type Metrics = BeaconMetrics &
|
||||
ForkChoiceMetrics &
|
||||
BeaconStateTransitionMetrics &
|
||||
LodestarMetrics & {register: RegistryMetricCreator; close: () => void};
|
||||
|
||||
export function createMetrics(opts: MetricsOptions, genesisTime: number, externalRegistries: Registry[] = []): Metrics {
|
||||
const register = new RegistryMetricCreator();
|
||||
const beacon = createBeaconMetrics(register);
|
||||
const forkChoice = getForkChoiceMetrics(register);
|
||||
const lodestar = createLodestarMetrics(register, opts.metadata, genesisTime);
|
||||
const stateTransition = getMetrics(register);
|
||||
|
||||
@@ -31,6 +34,7 @@ export function createMetrics(opts: MetricsOptions, genesisTime: number, externa
|
||||
|
||||
return {
|
||||
...beacon,
|
||||
...forkChoice,
|
||||
...lodestar,
|
||||
...stateTransition,
|
||||
register,
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import {UpdateHeadOpt} from "@lodestar/fork-choice";
|
||||
import {NotReorgedReason} from "@lodestar/fork-choice";
|
||||
import {ProducedBlockSource} from "@lodestar/types";
|
||||
import {BlockSelectionResult} from "../../api/impl/validator/index.js";
|
||||
import {BlockProductionStep, PayloadPreparationType} from "../../chain/produceBlock/index.js";
|
||||
@@ -73,68 +71,6 @@ export function createBeaconMetrics(register: RegistryMetricCreator) {
|
||||
|
||||
// Non-spec'ed
|
||||
|
||||
forkChoice: {
|
||||
findHead: register.histogram<{caller: string}>({
|
||||
name: "beacon_fork_choice_find_head_seconds",
|
||||
help: "Time taken to find head in seconds",
|
||||
buckets: [0.1, 1, 10],
|
||||
labelNames: ["caller"],
|
||||
}),
|
||||
requests: register.gauge({
|
||||
name: "beacon_fork_choice_requests_total",
|
||||
help: "Count of occasions where fork choice has tried to find a head",
|
||||
}),
|
||||
errors: register.gauge<{entrypoint: UpdateHeadOpt}>({
|
||||
name: "beacon_fork_choice_errors_total",
|
||||
help: "Count of occasions where fork choice has returned an error when trying to find a head",
|
||||
labelNames: ["entrypoint"],
|
||||
}),
|
||||
changedHead: register.gauge({
|
||||
name: "beacon_fork_choice_changed_head_total",
|
||||
help: "Count of occasions fork choice has found a new head",
|
||||
}),
|
||||
reorg: register.gauge({
|
||||
name: "beacon_fork_choice_reorg_total",
|
||||
help: "Count of occasions fork choice has switched to a different chain",
|
||||
}),
|
||||
reorgDistance: register.histogram({
|
||||
name: "beacon_fork_choice_reorg_distance",
|
||||
help: "Histogram of re-org distance",
|
||||
// We need high resolution in the low range, since re-orgs are a rare but critical event.
|
||||
// Add buckets up to 100 to capture high depth re-orgs. Above 100 things are going really bad.
|
||||
buckets: [1, 2, 3, 5, 7, 10, 20, 30, 50, 100],
|
||||
}),
|
||||
votes: register.gauge({
|
||||
name: "beacon_fork_choice_votes_count",
|
||||
help: "Current count of votes in fork choice data structures",
|
||||
}),
|
||||
queuedAttestations: register.gauge({
|
||||
name: "beacon_fork_choice_queued_attestations_count",
|
||||
help: "Count of queued_attestations in fork choice per slot",
|
||||
}),
|
||||
validatedAttestationDatas: register.gauge({
|
||||
name: "beacon_fork_choice_validated_attestation_datas_count",
|
||||
help: "Current count of validatedAttestationDatas in fork choice data structures",
|
||||
}),
|
||||
balancesLength: register.gauge({
|
||||
name: "beacon_fork_choice_balances_length",
|
||||
help: "Current length of balances in fork choice data structures",
|
||||
}),
|
||||
nodes: register.gauge({
|
||||
name: "beacon_fork_choice_nodes_count",
|
||||
help: "Current count of nodes in fork choice data structures",
|
||||
}),
|
||||
indices: register.gauge({
|
||||
name: "beacon_fork_choice_indices_count",
|
||||
help: "Current count of indices in fork choice data structures",
|
||||
}),
|
||||
notReorgedReason: register.counter<{reason: NotReorgedReason}>({
|
||||
name: "beacon_fork_choice_not_reorged_reason_total",
|
||||
help: "Reason why the current head is not re-orged out",
|
||||
labelNames: ["reason"],
|
||||
}),
|
||||
},
|
||||
|
||||
parentBlockDistance: register.histogram({
|
||||
name: "beacon_imported_block_parent_distance",
|
||||
help: "Histogram of distance to parent block of valid imported blocks",
|
||||
@@ -197,21 +133,10 @@ export function createBeaconMetrics(register: RegistryMetricCreator) {
|
||||
buckets: [0.001, 0.005, 0.01, 0.03, 0.05, 0.07, 0.1, 0.3, 0.5, 1],
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
|
||||
blockProductionCaches: {
|
||||
producedBlockRoot: register.gauge({
|
||||
name: "beacon_blockroot_produced_cache_total",
|
||||
help: "Count of cached produced block roots",
|
||||
}),
|
||||
producedBlindedBlockRoot: register.gauge({
|
||||
name: "beacon_blinded_blockroot_produced_cache_total",
|
||||
help: "Count of cached produced blinded block roots",
|
||||
}),
|
||||
producedContentsCache: register.gauge({
|
||||
name: "beacon_contents_produced_cache_total",
|
||||
help: "Count of cached produced blob contents",
|
||||
}),
|
||||
},
|
||||
blockProductionCacheSize: register.gauge({
|
||||
name: "beacon_block_production_cache_size",
|
||||
help: "Count of cached produced results",
|
||||
}),
|
||||
|
||||
blockPayload: {
|
||||
payloadAdvancePrepTime: register.histogram({
|
||||
@@ -362,6 +287,69 @@ export function createBeaconMetrics(register: RegistryMetricCreator) {
|
||||
}),
|
||||
},
|
||||
|
||||
peerDas: {
|
||||
dataColumnSidecarProcessingRequests: register.counter({
|
||||
name: "beacon_data_column_sidecar_processing_requests_total",
|
||||
help: "Number of data column sidecars submitted for processing",
|
||||
}),
|
||||
dataColumnSidecarProcessingSuccesses: register.counter({
|
||||
name: "beacon_data_column_sidecar_processing_successes_total",
|
||||
help: "Number of data column sidecars verified for gossip",
|
||||
}),
|
||||
dataColumnSidecarGossipVerificationTime: register.histogram({
|
||||
name: "beacon_data_column_sidecar_gossip_verification_seconds",
|
||||
help: "Full runtime of data column sidecars gossip verification",
|
||||
buckets: [0.025, 0.05, 0.1, 0.5, 1, 2, 5],
|
||||
}),
|
||||
dataColumnSidecarComputationTime: register.histogram({
|
||||
name: "beacon_data_column_sidecar_computation_seconds",
|
||||
help: "Time taken to compute data column sidecars, including cells and inclusion proof",
|
||||
buckets: [0.1, 0.25, 0.5, 0.75, 1, 2, 5],
|
||||
}),
|
||||
dataColumnSidecarInclusionProofVerificationTime: register.histogram({
|
||||
name: "beacon_data_column_sidecar_inclusion_proof_verification_seconds",
|
||||
help: "Time taken to verify data_column sidecar inclusion proof",
|
||||
buckets: [0.002, 0.004, 0.006, 0.008, 0.01, 0.05, 1, 2],
|
||||
}),
|
||||
dataColumnSidecarKzgProofsVerificationTime: register.histogram({
|
||||
name: "beacon_data_column_sidecar_kzg_proofs_verification_seconds",
|
||||
help: "Time taken to verify data_column sidecar kzg proofs",
|
||||
buckets: [0.01, 0.02, 0.03, 0.04, 0.05, 0.1, 0.2, 0.5, 1],
|
||||
}),
|
||||
kzgVerificationDataColumnBatchTime: register.histogram({
|
||||
name: "beacon_kzg_verification_data_column_batch_seconds",
|
||||
help: "Runtime of batched data column kzg verification",
|
||||
buckets: [0.025, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 1, 2, 5],
|
||||
}),
|
||||
getBlobsV2Requests: register.counter({
|
||||
name: "beacon_engine_getBlobsV2_requests_total",
|
||||
help: "Total number of engine_getBlobsV2 requests sent",
|
||||
}),
|
||||
getBlobsV2Responses: register.counter({
|
||||
name: "beacon_engine_getBlobsV2_responses_total",
|
||||
help: "Total number of engine_getBlobsV2 successful responses received",
|
||||
}),
|
||||
getBlobsV2RequestDuration: register.histogram({
|
||||
name: "beacon_engine_getBlobsV2_request_duration_seconds",
|
||||
help: "Duration of engine_getBlobsV2 requests",
|
||||
buckets: [0.01, 0.05, 0.1, 0.5, 1, 2.5, 5, 7.5],
|
||||
}),
|
||||
targetCustodyGroupCount: register.gauge({
|
||||
name: "beacon_target_custody_group_count",
|
||||
help: "Total number of custody groups within a node",
|
||||
}),
|
||||
reconstructedColumns: register.counter({
|
||||
name: "beacon_data_availability_reconstructed_columns_total",
|
||||
help: "Total count of reconstructed columns",
|
||||
}),
|
||||
dataColumnsReconstructionTime: register.histogram({
|
||||
name: "beacon_data_availability_reconstruction_time_seconds",
|
||||
help: "Time taken to reconstruct columns",
|
||||
// this data comes from 20 blobs in `fusaka-devnet-1`, need to reevaluate in the future
|
||||
buckets: [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 2, 5],
|
||||
}),
|
||||
},
|
||||
|
||||
// Non-spec'ed
|
||||
clockSlot: register.gauge({
|
||||
name: "beacon_clock_slot",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
/** biome-ignore-all lint/suspicious/noTemplateCurlyInString: The metric templates requires to have `${}` in a normal string */
|
||||
import {NotReorgedReason} from "@lodestar/fork-choice";
|
||||
import {BlockInputSource} from "../../chain/blocks/blockInput/index.js";
|
||||
import {BlobsSource, BlockSource} from "../../chain/blocks/types.js";
|
||||
import {BlobsSource, BlockSource, DataColumnsSource} from "../../chain/blocks/types.js";
|
||||
import {JobQueueItemType} from "../../chain/bls/index.js";
|
||||
import {AttestationErrorCode, BlockErrorCode} from "../../chain/errors/index.js";
|
||||
import {
|
||||
@@ -21,6 +22,7 @@ import {BackfillSyncMethod} from "../../sync/backfill/backfill.js";
|
||||
import {PendingBlockType} from "../../sync/index.js";
|
||||
import {PeerSyncType, RangeSyncType} from "../../sync/utils/remoteSyncType.js";
|
||||
import {AllocSource} from "../../util/bufferPool.js";
|
||||
import {RecoverResult} from "../../util/dataColumns.js";
|
||||
import {LodestarMetadata} from "../options.js";
|
||||
import {RegistryMetricCreator} from "../utils/registryMetricCreator.js";
|
||||
|
||||
@@ -486,6 +488,16 @@ export function createLodestarMetrics(
|
||||
name: "lodestar_sync_chain_highest_target_slot_completed",
|
||||
help: "Highest target slot completed by a sync chain",
|
||||
}),
|
||||
headSyncPeers: register.gauge<{columnIndex: number}>({
|
||||
name: "lodestar_sync_head_sync_peers_count",
|
||||
help: "Count of head sync peers by group index",
|
||||
labelNames: ["columnIndex"],
|
||||
}),
|
||||
finalizedSyncPeers: register.gauge<{columnIndex: number}>({
|
||||
name: "lodestar_sync_finalized_sync_peers_count",
|
||||
help: "Count of finalized sync peers by group index",
|
||||
labelNames: ["columnIndex"],
|
||||
}),
|
||||
},
|
||||
|
||||
syncUnknownBlock: {
|
||||
@@ -537,6 +549,20 @@ export function createLodestarMetrics(
|
||||
help: "Total number of blocks whose data availability was resolved",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
peerBalancer: {
|
||||
peersMetaCount: register.gauge({
|
||||
name: "lodestar_sync_unknown_block_peer_balancer_peers_meta_count",
|
||||
help: "Count of peers meta in UnknownBlockSync peer balancer",
|
||||
}),
|
||||
peersActiveRequestCount: register.gauge({
|
||||
name: "lodestar_sync_unknown_block_peer_balancer_peers_active_request_count",
|
||||
help: "Count of peers active requests in UnknownBlockSync peer balancer",
|
||||
}),
|
||||
totalActiveRequests: register.gauge({
|
||||
name: "lodestar_sync_unknown_block_peer_balancer_total_active_requests",
|
||||
help: "Total active requests in UnknownBlockSync peer balancer",
|
||||
}),
|
||||
},
|
||||
},
|
||||
|
||||
// Gossip sync committee
|
||||
@@ -717,6 +743,45 @@ export function createLodestarMetrics(
|
||||
buckets: [0.05, 0.1, 0.2, 0.5, 1, 1.5, 2, 4],
|
||||
}),
|
||||
},
|
||||
recoverDataColumnSidecars: {
|
||||
elapsedTimeTillReconstructed: register.histogram({
|
||||
name: "lodestar_data_column_sidecar_elapsed_time_till_reconstructed_seconds",
|
||||
help: "Time elapsed between block slot time and the time data column sidecar reconstructed",
|
||||
buckets: [2, 4, 6, 8, 10, 12],
|
||||
}),
|
||||
custodyBeforeReconstruction: register.gauge({
|
||||
name: "lodestar_data_columns_in_custody_before_reconstruction",
|
||||
help: "Number of data columns in custody before reconstruction",
|
||||
}),
|
||||
reconstructionResult: register.gauge<{result: RecoverResult}>({
|
||||
name: "lodestar_data_column_sidecars_reconstruction_result",
|
||||
help: "Data column sidecars reconstruction result",
|
||||
labelNames: ["result"],
|
||||
}),
|
||||
},
|
||||
dataColumns: {
|
||||
bySource: register.gauge<{source: DataColumnsSource}>({
|
||||
name: "lodestar_data_columns_by_source",
|
||||
help: "Number of received data columns by source",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
elapsedTimeTillReceived: register.histogram<{source: DataColumnsSource}>({
|
||||
name: "lodestar_data_column_elapsed_time_till_received_seconds",
|
||||
help: "Time elapsed between block slot time and the time data column received",
|
||||
labelNames: ["source"],
|
||||
buckets: [1, 2, 3, 4, 6, 12],
|
||||
}),
|
||||
sentPeersPerSubnet: register.histogram({
|
||||
name: "lodestar_data_column_sent_peers_per_subnet",
|
||||
help: "Number of peers node sent per subnet when publishing DataColumnSidecars",
|
||||
// given TARGET_GROUP_PEERS_PER_SUBNET = 4, we expect sending to 4 peers per subnet
|
||||
buckets: [1, 2, 3, 4],
|
||||
}),
|
||||
missingCustodyColumns: register.counter({
|
||||
name: "lodestar_data_columns_missing_custody_columns_count",
|
||||
help: "Total number of missing columns that should be in the database but were not when requested",
|
||||
}),
|
||||
},
|
||||
importBlock: {
|
||||
persistBlockNoSerializedDataCount: register.gauge({
|
||||
name: "lodestar_import_block_persist_block_no_serialized_data_count",
|
||||
|
||||
@@ -3,6 +3,7 @@ import {RegistryMetricCreator} from "../../metrics/utils/registryMetricCreator.j
|
||||
import {Libp2pError} from "../libp2p/error.js";
|
||||
import {SubnetType} from "../metadata.js";
|
||||
import {DiscoveredPeerStatus, NotDialReason} from "../peers/discover.js";
|
||||
import {PeerRequestedSubnetType} from "../peers/peerManager.js";
|
||||
import {SubnetSource} from "../subnets/attnetsService.js";
|
||||
|
||||
export type NetworkCoreMetrics = ReturnType<typeof createNetworkCoreMetrics>;
|
||||
@@ -32,6 +33,11 @@ export function createNetworkCoreMetrics(register: RegistryMetricCreator) {
|
||||
help: "Histogram of current count of long lived attnets of connected peers",
|
||||
buckets: [0, 4, 16, 32, 64],
|
||||
}),
|
||||
peerColumnGroupCount: register.histogram({
|
||||
name: "lodestar_peer_column_group_count",
|
||||
help: "Histogram of current count of column groups of connected peers",
|
||||
buckets: [0, 4, 8, 16, 32, 64, 128],
|
||||
}),
|
||||
peerScoreByClient: register.histogram<{client: string}>({
|
||||
name: "lodestar_app_peer_score",
|
||||
help: "Current peer score at lodestar app side",
|
||||
@@ -91,12 +97,12 @@ export function createNetworkCoreMetrics(register: RegistryMetricCreator) {
|
||||
help: "Prioritization results total peers count requested to disconnect",
|
||||
labelNames: ["reason"],
|
||||
}),
|
||||
peersRequestedSubnetsToQuery: register.gauge<{type: SubnetType}>({
|
||||
peersRequestedSubnetsToQuery: register.gauge<{type: PeerRequestedSubnetType}>({
|
||||
name: "lodestar_peers_requested_total_subnets_to_query",
|
||||
help: "Prioritization results total subnets to query and discover peers in",
|
||||
labelNames: ["type"],
|
||||
}),
|
||||
peersRequestedSubnetsPeerCount: register.gauge<{type: SubnetType}>({
|
||||
peersRequestedSubnetsPeerCount: register.gauge<{type: PeerRequestedSubnetType}>({
|
||||
name: "lodestar_peers_requested_total_subnets_peers_count",
|
||||
help: "Prioritization results total peers in subnets to query and discover peers in",
|
||||
labelNames: ["type"],
|
||||
@@ -106,6 +112,11 @@ export function createNetworkCoreMetrics(register: RegistryMetricCreator) {
|
||||
help: "network.reportPeer count by reason",
|
||||
labelNames: ["reason"],
|
||||
}),
|
||||
peerCountPerSamplingGroup: register.gauge<{groupIndex: number}>({
|
||||
name: "lodestar_peer_count_per_sampling_group",
|
||||
help: "Current count of peers per sampling group",
|
||||
labelNames: ["groupIndex"],
|
||||
}),
|
||||
peerManager: {
|
||||
heartbeatDuration: register.histogram({
|
||||
name: "lodestar_peer_manager_heartbeat_duration_seconds",
|
||||
@@ -132,11 +143,19 @@ export function createNetworkCoreMetrics(register: RegistryMetricCreator) {
|
||||
help: "Current peers to connect count from discoverPeers requests",
|
||||
labelNames: ["type"],
|
||||
}),
|
||||
custodyGroupPeersToConnect: register.gauge({
|
||||
name: "lodestar_discovery_custody_group_peers_to_connect",
|
||||
help: "Current PeerDAS custodyGroup peers to connect count from discoverPeers requests",
|
||||
}),
|
||||
subnetsToConnect: register.gauge<{type: SubnetType}>({
|
||||
name: "lodestar_discovery_subnets_to_connect",
|
||||
help: "Current subnets to connect count from discoverPeers requests",
|
||||
labelNames: ["type"],
|
||||
}),
|
||||
custodyGroupsToConnect: register.gauge({
|
||||
name: "lodestar_discovery_custody_groups_to_connect",
|
||||
help: "PeerDAS custodyGroups to connect count from discoverPeers requests",
|
||||
}),
|
||||
cachedENRsSize: register.gauge({
|
||||
name: "lodestar_discovery_cached_enrs_size",
|
||||
help: "Current size of the cachedENRs Set",
|
||||
@@ -160,15 +179,15 @@ export function createNetworkCoreMetrics(register: RegistryMetricCreator) {
|
||||
help: "Total count of status results of PeerDiscovery.onDiscovered() function",
|
||||
labelNames: ["status"],
|
||||
}),
|
||||
dialAttempts: register.gauge({
|
||||
name: "lodestar_discovery_total_dial_attempts",
|
||||
help: "Total dial attempts by peer discovery",
|
||||
}),
|
||||
notDialReason: register.gauge<{reason: NotDialReason}>({
|
||||
name: "lodestar_discovery_not_dial_reason_total_count",
|
||||
help: "Total count of not dial reasons",
|
||||
labelNames: ["reason"],
|
||||
}),
|
||||
dialAttempts: register.gauge({
|
||||
name: "lodestar_discovery_total_dial_attempts",
|
||||
help: "Total dial attempts by peer discovery",
|
||||
}),
|
||||
dialTime: register.histogram<{status: string}>({
|
||||
name: "lodestar_discovery_dial_time_seconds",
|
||||
help: "Time to dial peers in seconds",
|
||||
|
||||
@@ -1,25 +1,28 @@
|
||||
import {ENR} from "@chainsafe/enr";
|
||||
import {PeerScoreStatsDump} from "@chainsafe/libp2p-gossipsub/dist/src/score/peer-score.js";
|
||||
import {PublishOpts} from "@chainsafe/libp2p-gossipsub/types";
|
||||
import {Connection, PrivateKey} from "@libp2p/interface";
|
||||
import {peerIdFromPrivateKey} from "@libp2p/peer-id";
|
||||
import {routes} from "@lodestar/api";
|
||||
import {BeaconConfig, ForkBoundary} from "@lodestar/config";
|
||||
import type {LoggerNode} from "@lodestar/logger/node";
|
||||
import {isForkPostFulu} from "@lodestar/params";
|
||||
import {ResponseIncoming} from "@lodestar/reqresp";
|
||||
import {Epoch, phase0, ssz, sszTypesFor} from "@lodestar/types";
|
||||
import {fromHex} from "@lodestar/utils";
|
||||
import {Epoch, Status, fulu, sszTypesFor} from "@lodestar/types";
|
||||
import {multiaddr} from "@multiformats/multiaddr";
|
||||
import {formatNodePeer} from "../../api/impl/node/utils.js";
|
||||
import {RegistryMetricCreator} from "../../metrics/index.js";
|
||||
import {ClockEvent, IClock} from "../../util/clock.js";
|
||||
import {CustodyConfig} from "../../util/dataColumns.js";
|
||||
import {PeerIdStr, peerIdFromString, peerIdToString} from "../../util/peerId.js";
|
||||
import {Discv5Worker} from "../discv5/index.js";
|
||||
import {NetworkEventBus} from "../events.js";
|
||||
import {FORK_EPOCH_LOOKAHEAD, getActiveForkBoundaries} from "../forks.js";
|
||||
import {Eth2Gossipsub, getCoreTopicsAtFork} from "../gossip/index.js";
|
||||
import {getDataColumnSidecarTopics} from "../gossip/topic.js";
|
||||
import {Libp2p} from "../interface.js";
|
||||
import {createNodeJsLibp2p} from "../libp2p/index.js";
|
||||
import {MetadataController} from "../metadata.js";
|
||||
import {NetworkConfig} from "../networkConfig.js";
|
||||
import {NetworkOptions} from "../options.js";
|
||||
import {PeerAction, PeerRpcScoreStore, PeerScoreStats} from "../peers/index.js";
|
||||
import {PeerManager} from "../peers/peerManager.js";
|
||||
@@ -28,7 +31,7 @@ import {ReqRespBeaconNode} from "../reqresp/ReqRespBeaconNode.js";
|
||||
import {GetReqRespHandlerFn, OutgoingRequestArgs} from "../reqresp/types.js";
|
||||
import {LocalStatusCache} from "../statusCache.js";
|
||||
import {AttnetsService} from "../subnets/attnetsService.js";
|
||||
import {CommitteeSubscription, IAttnetsService} from "../subnets/interface.js";
|
||||
import {CommitteeSubscription, IAttnetsService, computeNodeId} from "../subnets/interface.js";
|
||||
import {SyncnetsService} from "../subnets/syncnetsService.js";
|
||||
import {getConnectionsMap} from "../util.js";
|
||||
import {NetworkCoreMetrics, createNetworkCoreMetrics} from "./metrics.js";
|
||||
@@ -41,6 +44,7 @@ type Mods = {
|
||||
attnetsService: IAttnetsService;
|
||||
syncnetsService: SyncnetsService;
|
||||
peerManager: PeerManager;
|
||||
networkConfig: NetworkConfig;
|
||||
peersData: PeersData;
|
||||
metadata: MetadataController;
|
||||
logger: LoggerNode;
|
||||
@@ -62,7 +66,8 @@ export type BaseNetworkInit = {
|
||||
events: NetworkEventBus;
|
||||
getReqRespHandler: GetReqRespHandlerFn;
|
||||
activeValidatorCount: number;
|
||||
initialStatus: phase0.Status;
|
||||
initialStatus: Status;
|
||||
initialCustodyGroupCount: number;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -87,6 +92,7 @@ export class NetworkCore implements INetworkCore {
|
||||
private readonly attnetsService: IAttnetsService;
|
||||
private readonly syncnetsService: SyncnetsService;
|
||||
private readonly peerManager: PeerManager;
|
||||
private readonly networkConfig: NetworkConfig;
|
||||
private readonly peersData: PeersData;
|
||||
private readonly reqResp: ReqRespBeaconNode;
|
||||
private readonly gossip: Eth2Gossipsub;
|
||||
@@ -110,6 +116,7 @@ export class NetworkCore implements INetworkCore {
|
||||
this.attnetsService = modules.attnetsService;
|
||||
this.syncnetsService = modules.syncnetsService;
|
||||
this.peerManager = modules.peerManager;
|
||||
this.networkConfig = modules.networkConfig;
|
||||
this.peersData = modules.peersData;
|
||||
this.metadata = modules.metadata;
|
||||
this.logger = modules.logger;
|
||||
@@ -134,6 +141,7 @@ export class NetworkCore implements INetworkCore {
|
||||
getReqRespHandler,
|
||||
activeValidatorCount,
|
||||
initialStatus,
|
||||
initialCustodyGroupCount,
|
||||
}: BaseNetworkInit): Promise<NetworkCore> {
|
||||
const libp2p = await createNodeJsLibp2p(privateKey, opts, {
|
||||
peerStoreDir,
|
||||
@@ -143,17 +151,23 @@ export class NetworkCore implements INetworkCore {
|
||||
|
||||
const metrics = metricsRegistry ? createNetworkCoreMetrics(metricsRegistry) : null;
|
||||
const peersData = new PeersData();
|
||||
const peerRpcScores = new PeerRpcScoreStore(opts, metrics);
|
||||
const peerRpcScores = new PeerRpcScoreStore(opts, metrics, logger);
|
||||
const statusCache = new LocalStatusCache(initialStatus);
|
||||
|
||||
// Bind discv5's ENR to local metadata
|
||||
// resolve circular dependency by setting `discv5` variable after the peer manager is instantiated
|
||||
// biome-ignore lint/style/useConst: <explanation>
|
||||
let discv5: Discv5Worker | undefined;
|
||||
const onMetadataSetValue = function onMetadataSetValue(key: string, value: Uint8Array): void {
|
||||
discv5?.setEnrValue(key, value).catch((e) => logger.error("error on setEnrValue", {key}, e));
|
||||
};
|
||||
const metadata = new MetadataController({}, {config, logger, onSetValue: onMetadataSetValue});
|
||||
const peerId = peerIdFromPrivateKey(privateKey);
|
||||
const nodeId = computeNodeId(peerId);
|
||||
const networkConfig: NetworkConfig = {
|
||||
nodeId,
|
||||
config,
|
||||
custodyConfig: new CustodyConfig({nodeId, config, initialCustodyGroupCount}),
|
||||
};
|
||||
const metadata = new MetadataController({}, {networkConfig, logger, onSetValue: onMetadataSetValue});
|
||||
|
||||
const reqResp = new ReqRespBeaconNode(
|
||||
{
|
||||
@@ -172,7 +186,7 @@ export class NetworkCore implements INetworkCore {
|
||||
);
|
||||
|
||||
const gossip = new Eth2Gossipsub(opts, {
|
||||
config,
|
||||
networkConfig,
|
||||
libp2p,
|
||||
logger,
|
||||
metricsRegister: metricsRegistry,
|
||||
@@ -192,9 +206,16 @@ export class NetworkCore implements INetworkCore {
|
||||
// should be called before AttnetsService constructor so that node subscribe to deterministic attnet topics
|
||||
await gossip.start();
|
||||
|
||||
const enr = opts.discv5?.enr;
|
||||
const nodeId = enr ? fromHex(ENR.decodeTxt(enr).nodeId) : null;
|
||||
const attnetsService = new AttnetsService(config, clock, gossip, metadata, logger, metrics, nodeId, opts);
|
||||
const attnetsService = new AttnetsService(
|
||||
config,
|
||||
clock,
|
||||
gossip,
|
||||
metadata,
|
||||
logger,
|
||||
metrics,
|
||||
networkConfig.nodeId,
|
||||
opts
|
||||
);
|
||||
const syncnetsService = new SyncnetsService(config, clock, gossip, metadata, logger, metrics, opts);
|
||||
|
||||
const peerManager = await PeerManager.init(
|
||||
@@ -208,9 +229,9 @@ export class NetworkCore implements INetworkCore {
|
||||
logger,
|
||||
metrics,
|
||||
clock,
|
||||
config,
|
||||
peerRpcScores,
|
||||
events,
|
||||
networkConfig,
|
||||
peersData,
|
||||
statusCache,
|
||||
},
|
||||
@@ -237,6 +258,7 @@ export class NetworkCore implements INetworkCore {
|
||||
attnetsService,
|
||||
syncnetsService,
|
||||
peerManager,
|
||||
networkConfig,
|
||||
peersData,
|
||||
metadata,
|
||||
logger,
|
||||
@@ -272,6 +294,10 @@ export class NetworkCore implements INetworkCore {
|
||||
this.closed = true;
|
||||
}
|
||||
|
||||
getNetworkConfig(): NetworkConfig {
|
||||
return this.networkConfig;
|
||||
}
|
||||
|
||||
async scrapeMetrics(): Promise<string> {
|
||||
return [
|
||||
(await this.metrics?.register.metrics()) ?? "",
|
||||
@@ -282,12 +308,14 @@ export class NetworkCore implements INetworkCore {
|
||||
.join("\n\n");
|
||||
}
|
||||
|
||||
async updateStatus(status: phase0.Status): Promise<void> {
|
||||
async updateStatus(status: Status): Promise<void> {
|
||||
this.statusCache.update(status);
|
||||
}
|
||||
|
||||
async reportPeer(peer: PeerIdStr, action: PeerAction, actionName: string): Promise<void> {
|
||||
this.peerManager.reportPeer(peerIdFromString(peer), action, actionName);
|
||||
}
|
||||
|
||||
async reStatusPeers(peers: PeerIdStr[]): Promise<void> {
|
||||
this.peerManager.reStatusPeers(peers);
|
||||
}
|
||||
@@ -314,7 +342,7 @@ export class NetworkCore implements INetworkCore {
|
||||
}
|
||||
|
||||
for (const boundary of getActiveForkBoundaries(this.config, this.clock.currentEpoch)) {
|
||||
this.subscribeCoreTopicsAtBoundary(this.config, boundary);
|
||||
this.subscribeCoreTopicsAtBoundary(this.networkConfig, boundary);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -323,7 +351,7 @@ export class NetworkCore implements INetworkCore {
|
||||
*/
|
||||
async unsubscribeGossipCoreTopics(): Promise<void> {
|
||||
for (const boundary of this.forkBoundariesByEpoch.values()) {
|
||||
this.unsubscribeCoreTopicsAtBoundary(this.config, boundary);
|
||||
this.unsubscribeCoreTopicsAtBoundary(this.networkConfig, boundary);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -335,26 +363,65 @@ export class NetworkCore implements INetworkCore {
|
||||
const peerId = peerIdFromString(data.peerId);
|
||||
return this.reqResp.sendRequestWithoutEncoding(peerId, data.method, data.versions, data.requestData);
|
||||
}
|
||||
|
||||
async publishGossip(topic: string, data: Uint8Array, opts?: PublishOpts | undefined): Promise<number> {
|
||||
const {recipients} = await this.gossip.publish(topic, data, opts);
|
||||
return recipients.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handler of ChainEvent.updateTargetCustodyGroupCount event
|
||||
* Updates the target custody group count in the network config and metadata.
|
||||
* Also subscribes to new data_column_sidecar subnet topics for the new custody group count.
|
||||
*/
|
||||
async setTargetGroupCount(count: number): Promise<void> {
|
||||
this.networkConfig.custodyConfig.updateTargetCustodyGroupCount(count);
|
||||
this.metadata.custodyGroupCount = count;
|
||||
// cannot call subscribeGossipCoreTopics() because we subsribed to core topics already
|
||||
// we only need to subscribe to more data_column_sidecar topics
|
||||
const dataColumnSubnetTopics = getDataColumnSidecarTopics(this.networkConfig);
|
||||
const activeBoundaries = getActiveForkBoundaries(this.config, this.clock.currentEpoch);
|
||||
for (const boundary of activeBoundaries) {
|
||||
for (const topic of dataColumnSubnetTopics) {
|
||||
// there are existing subscriptions for old subnets, in that case gossipsub will just ignore
|
||||
this.gossip.subscribeTopic({...topic, boundary});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// REST API queries
|
||||
|
||||
async getNetworkIdentity(): Promise<routes.node.NetworkIdentity> {
|
||||
// biome-ignore lint/complexity/useLiteralKeys: `discovery` is a private attribute
|
||||
const enr = await this.peerManager["discovery"]?.discv5.enr();
|
||||
|
||||
// enr.getFullMultiaddr can counterintuitively return undefined near startup if the enr.ip or enr.ip6 is not set.
|
||||
// Eventually, the enr will be updated with the correct ip after discv5 runs for a while.
|
||||
|
||||
// Node's addresses on which is listening for discv5 requests.
|
||||
// The example provided by the beacon-APIs show a _full_ multiaddr, ie including the peer id, so we include it.
|
||||
const discoveryAddresses = [
|
||||
enr?.getLocationMultiaddr("tcp")?.toString() ?? null,
|
||||
enr?.getLocationMultiaddr("udp")?.toString() ?? null,
|
||||
(await enr?.getFullMultiaddr("udp"))?.toString(),
|
||||
(await enr?.getFullMultiaddr("udp6"))?.toString(),
|
||||
].filter((addr): addr is string => Boolean(addr));
|
||||
|
||||
// Node's addresses on which eth2 RPC requests are served.
|
||||
const p2pAddresses = [
|
||||
// It is useful to include listen multiaddrs even if they likely aren't public IPs
|
||||
// This means that we will always return some multiaddrs
|
||||
...this.libp2p.getMultiaddrs().map((ma) => ma.toString()),
|
||||
|
||||
(await enr?.getFullMultiaddr("tcp"))?.toString(),
|
||||
(await enr?.getFullMultiaddr("tcp6"))?.toString(),
|
||||
(await enr?.getFullMultiaddr("quic"))?.toString(),
|
||||
(await enr?.getFullMultiaddr("quic6"))?.toString(),
|
||||
].filter((addr): addr is string => Boolean(addr));
|
||||
|
||||
return {
|
||||
peerId: peerIdToString(this.libp2p.peerId),
|
||||
enr: enr?.encodeTxt() || "",
|
||||
discoveryAddresses,
|
||||
p2pAddresses: this.libp2p.getMultiaddrs().map((m) => m.toString()),
|
||||
p2pAddresses,
|
||||
metadata: this.metadata.json,
|
||||
};
|
||||
}
|
||||
@@ -390,10 +457,14 @@ export class NetworkCore implements INetworkCore {
|
||||
private _dumpPeer(peerIdStr: string, connections: Connection[]): routes.lodestar.LodestarNodePeer {
|
||||
const peerData = this.peersData.connectedPeers.get(peerIdStr);
|
||||
const fork = this.config.getForkName(this.clock.currentSlot);
|
||||
if (isForkPostFulu(fork) && peerData?.status) {
|
||||
(peerData.status as fulu.Status).earliestAvailableSlot =
|
||||
(peerData.status as fulu.Status).earliestAvailableSlot ?? 0;
|
||||
}
|
||||
return {
|
||||
...formatNodePeer(peerIdStr, connections),
|
||||
agentVersion: peerData?.agentVersion ?? "NA",
|
||||
status: peerData?.status ? ssz.phase0.Status.toJson(peerData.status) : null,
|
||||
status: peerData?.status ? sszTypesFor(fork).Status.toJson(peerData.status) : null,
|
||||
metadata: peerData?.metadata ? sszTypesFor(fork).Metadata.toJson(peerData.metadata) : null,
|
||||
agentClient: String(peerData?.agentClient ?? "Unknown"),
|
||||
lastReceivedMsgUnixTsMs: peerData?.lastReceivedMsgUnixTsMs ?? 0,
|
||||
@@ -470,7 +541,7 @@ export class NetworkCore implements INetworkCore {
|
||||
if (epoch === nextBoundaryEpoch - FORK_EPOCH_LOOKAHEAD) {
|
||||
// Don't subscribe to new fork boundary if the node is not subscribed to any topic
|
||||
if (await this.isSubscribedToGossipCoreTopics()) {
|
||||
this.subscribeCoreTopicsAtBoundary(this.config, nextBoundary);
|
||||
this.subscribeCoreTopicsAtBoundary(this.networkConfig, nextBoundary);
|
||||
this.logger.info("Subscribing gossip topics for next fork boundary", nextBoundary);
|
||||
} else {
|
||||
this.logger.info("Skipping subscribing gossip topics for next fork boundary", nextBoundary);
|
||||
@@ -489,7 +560,7 @@ export class NetworkCore implements INetworkCore {
|
||||
// After fork boundary transition
|
||||
if (epoch === nextBoundaryEpoch + FORK_EPOCH_LOOKAHEAD) {
|
||||
this.logger.info("Unsubscribing gossip topics of previous fork boundary", prevBoundary);
|
||||
this.unsubscribeCoreTopicsAtBoundary(this.config, prevBoundary);
|
||||
this.unsubscribeCoreTopicsAtBoundary(this.networkConfig, prevBoundary);
|
||||
this.attnetsService.unsubscribeSubnetsPrevBoundary(prevBoundary);
|
||||
this.syncnetsService.unsubscribeSubnetsPrevBoundary(prevBoundary);
|
||||
}
|
||||
@@ -500,12 +571,12 @@ export class NetworkCore implements INetworkCore {
|
||||
}
|
||||
};
|
||||
|
||||
private subscribeCoreTopicsAtBoundary(config: BeaconConfig, boundary: ForkBoundary): void {
|
||||
private subscribeCoreTopicsAtBoundary(networkConfig: NetworkConfig, boundary: ForkBoundary): void {
|
||||
if (this.forkBoundariesByEpoch.has(boundary.epoch)) return;
|
||||
this.forkBoundariesByEpoch.set(boundary.epoch, boundary);
|
||||
const {subscribeAllSubnets, disableLightClientServer} = this.opts;
|
||||
|
||||
for (const topic of getCoreTopicsAtFork(config, boundary.fork, {
|
||||
for (const topic of getCoreTopicsAtFork(networkConfig, boundary.fork, {
|
||||
subscribeAllSubnets,
|
||||
disableLightClientServer,
|
||||
})) {
|
||||
@@ -513,12 +584,12 @@ export class NetworkCore implements INetworkCore {
|
||||
}
|
||||
}
|
||||
|
||||
private unsubscribeCoreTopicsAtBoundary(config: BeaconConfig, boundary: ForkBoundary): void {
|
||||
private unsubscribeCoreTopicsAtBoundary(networkConfig: NetworkConfig, boundary: ForkBoundary): void {
|
||||
if (!this.forkBoundariesByEpoch.has(boundary.epoch)) return;
|
||||
this.forkBoundariesByEpoch.delete(boundary.epoch);
|
||||
const {subscribeAllSubnets, disableLightClientServer} = this.opts;
|
||||
|
||||
for (const topic of getCoreTopicsAtFork(config, boundary.fork, {
|
||||
for (const topic of getCoreTopicsAtFork(networkConfig, boundary.fork, {
|
||||
subscribeAllSubnets,
|
||||
disableLightClientServer,
|
||||
})) {
|
||||
|
||||
@@ -104,6 +104,7 @@ const core = await NetworkCore.init({
|
||||
reqRespBridgeRespCaller.getAsyncIterable({method, req, peerId: peerIdToString(peerId)}),
|
||||
activeValidatorCount: workerData.activeValidatorCount,
|
||||
initialStatus: workerData.initialStatus,
|
||||
initialCustodyGroupCount: workerData.initialCustodyGroupCount,
|
||||
});
|
||||
|
||||
wireEventsOnWorkerThread<NetworkEventData>(
|
||||
@@ -140,6 +141,8 @@ const libp2pWorkerApi: NetworkWorkerApi = {
|
||||
// sendReqRespRequest - handled via events with AsyncIterableBridgeHandler
|
||||
publishGossip: (topic, data, opts) => core.publishGossip(topic, data, opts),
|
||||
|
||||
setTargetGroupCount: (count) => core.setTargetGroupCount(count),
|
||||
|
||||
// Debug
|
||||
|
||||
getNetworkIdentity: () => core.getNetworkIdentity(),
|
||||
|
||||
@@ -9,7 +9,7 @@ import {routes} from "@lodestar/api";
|
||||
import {BeaconConfig, chainConfigToJson} from "@lodestar/config";
|
||||
import type {LoggerNode} from "@lodestar/logger/node";
|
||||
import {ResponseIncoming, ResponseOutgoing} from "@lodestar/reqresp";
|
||||
import {phase0} from "@lodestar/types";
|
||||
import {Status} from "@lodestar/types";
|
||||
import {Metrics} from "../../metrics/index.js";
|
||||
import {AsyncIterableBridgeCaller, AsyncIterableBridgeHandler} from "../../util/asyncIterableToEvents.js";
|
||||
import {PeerIdStr, peerIdFromString} from "../../util/peerId.js";
|
||||
@@ -37,7 +37,8 @@ export type WorkerNetworkCoreOpts = NetworkOptions & {
|
||||
peerStoreDir?: string;
|
||||
activeValidatorCount: number;
|
||||
genesisTime: number;
|
||||
initialStatus: phase0.Status;
|
||||
initialStatus: Status;
|
||||
initialCustodyGroupCount: number;
|
||||
};
|
||||
|
||||
export type WorkerNetworkCoreInitModules = {
|
||||
@@ -104,7 +105,15 @@ export class WorkerNetworkCore implements INetworkCore {
|
||||
|
||||
static async init(modules: WorkerNetworkCoreInitModules): Promise<WorkerNetworkCore> {
|
||||
const {opts, config, privateKey} = modules;
|
||||
const {genesisTime, peerStoreDir, activeValidatorCount, localMultiaddrs, metricsEnabled, initialStatus} = opts;
|
||||
const {
|
||||
genesisTime,
|
||||
peerStoreDir,
|
||||
activeValidatorCount,
|
||||
localMultiaddrs,
|
||||
metricsEnabled,
|
||||
initialStatus,
|
||||
initialCustodyGroupCount,
|
||||
} = opts;
|
||||
|
||||
const workerData: NetworkWorkerData = {
|
||||
opts,
|
||||
@@ -116,6 +125,7 @@ export class WorkerNetworkCore implements INetworkCore {
|
||||
peerStoreDir,
|
||||
genesisTime,
|
||||
initialStatus,
|
||||
initialCustodyGroupCount,
|
||||
activeValidatorCount,
|
||||
loggerOpts: modules.logger.toOpts(),
|
||||
};
|
||||
@@ -136,7 +146,7 @@ export class WorkerNetworkCore implements INetworkCore {
|
||||
resourceLimits: {maxYoungGenerationSizeMb: opts.maxYoungGenerationSizeMb},
|
||||
} as ConstructorParameters<typeof Worker>[1]);
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
// biome-ignore lint/suspicious/noExplicitAny: Don't know any specific interface for the spawn
|
||||
const networkThreadApi = (await spawn<any>(worker, {
|
||||
// A Lodestar Node may do very expensive task at start blocking the event loop and causing
|
||||
// the initialization to timeout. The number below is big enough to almost disable the timeout
|
||||
@@ -172,7 +182,7 @@ export class WorkerNetworkCore implements INetworkCore {
|
||||
return this.getApi().scrapeMetrics();
|
||||
}
|
||||
|
||||
updateStatus(status: phase0.Status): Promise<void> {
|
||||
updateStatus(status: Status): Promise<void> {
|
||||
return this.getApi().updateStatus(status);
|
||||
}
|
||||
reStatusPeers(peers: PeerIdStr[]): Promise<void> {
|
||||
@@ -217,6 +227,12 @@ export class WorkerNetworkCore implements INetworkCore {
|
||||
return this.getApi().publishGossip(topic, data, opts);
|
||||
}
|
||||
|
||||
// Custody
|
||||
|
||||
setTargetGroupCount(count: number): Promise<void> {
|
||||
return this.getApi().setTargetGroupCount(count);
|
||||
}
|
||||
|
||||
// Debug
|
||||
|
||||
connectToPeer(peer: PeerIdStr, multiaddr: MultiaddrStr[]): Promise<void> {
|
||||
|
||||
@@ -4,7 +4,7 @@ import {routes} from "@lodestar/api";
|
||||
import {SpecJson} from "@lodestar/config";
|
||||
import {LoggerNodeOpts} from "@lodestar/logger/node";
|
||||
import {ResponseIncoming} from "@lodestar/reqresp";
|
||||
import {phase0} from "@lodestar/types";
|
||||
import {Status} from "@lodestar/types";
|
||||
import {PeerIdStr} from "../../util/peerId.js";
|
||||
import {NetworkOptions} from "../options.js";
|
||||
import {PeerAction, PeerScoreStats} from "../peers/index.js";
|
||||
@@ -53,8 +53,9 @@ export interface INetworkCore extends INetworkCorePublic {
|
||||
getConnectedPeerCount(): Promise<number>;
|
||||
|
||||
/** Chain must push status updates to the network core */
|
||||
updateStatus(status: phase0.Status): Promise<void>;
|
||||
updateStatus(status: Status): Promise<void>;
|
||||
|
||||
setTargetGroupCount(count: number): Promise<void>;
|
||||
/** Opens stream to handle ReqResp outgoing request */
|
||||
sendReqRespRequest(data: OutgoingRequestArgs): AsyncIterable<ResponseIncoming>;
|
||||
/** Publish gossip message to peers */
|
||||
@@ -78,7 +79,8 @@ export type NetworkWorkerData = {
|
||||
genesisValidatorsRoot: Uint8Array;
|
||||
genesisTime: number;
|
||||
activeValidatorCount: number;
|
||||
initialStatus: phase0.Status;
|
||||
initialStatus: Status;
|
||||
initialCustodyGroupCount: number;
|
||||
privateKeyProto: Uint8Array;
|
||||
localMultiaddrs: string[];
|
||||
metricsEnabled: boolean;
|
||||
@@ -91,7 +93,7 @@ export type NetworkWorkerData = {
|
||||
*/
|
||||
export type NetworkWorkerApi = INetworkCorePublic & {
|
||||
// To satisfy the constraint of `ModuleThread` type
|
||||
// biome-ignore lint/suspicious/noExplicitAny:
|
||||
// biome-ignore lint/suspicious/noExplicitAny: Explicitly needed the `any` type here
|
||||
[string: string]: (...args: any[]) => Promise<any> | any;
|
||||
// Async method through worker boundary
|
||||
reportPeer(peer: PeerIdStr, action: PeerAction, actionName: string): Promise<void>;
|
||||
@@ -100,7 +102,9 @@ export type NetworkWorkerApi = INetworkCorePublic & {
|
||||
// TODO: Duplicated methods with INetwork interface
|
||||
getConnectedPeers(): Promise<PeerIdStr[]>;
|
||||
getConnectedPeerCount(): Promise<number>;
|
||||
updateStatus(status: phase0.Status): Promise<void>;
|
||||
updateStatus(status: Status): Promise<void>;
|
||||
|
||||
setTargetGroupCount(count: number): Promise<void>;
|
||||
|
||||
// sendReqRespRequest - implemented via events
|
||||
publishGossip(topic: string, data: Uint8Array, opts?: PublishOpts): Promise<number>;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import {EventEmitter} from "node:events";
|
||||
import {PeerId, TopicValidatorResult} from "@libp2p/interface";
|
||||
import {RootHex, phase0} from "@lodestar/types";
|
||||
import {CustodyIndex, RootHex, Status} from "@lodestar/types";
|
||||
import {BlockInput, NullBlockInput} from "../chain/blocks/types.js";
|
||||
import {PeerIdStr} from "../util/peerId.js";
|
||||
import {StrictEventEmitterSingleArg} from "../util/strictEvents.js";
|
||||
@@ -27,7 +27,12 @@ export enum NetworkEvent {
|
||||
}
|
||||
|
||||
export type NetworkEventData = {
|
||||
[NetworkEvent.peerConnected]: {peer: PeerIdStr; status: phase0.Status};
|
||||
[NetworkEvent.peerConnected]: {
|
||||
peer: PeerIdStr;
|
||||
status: Status;
|
||||
custodyGroups: CustodyIndex[];
|
||||
clientAgent: string;
|
||||
};
|
||||
[NetworkEvent.peerDisconnected]: {peer: PeerIdStr};
|
||||
[NetworkEvent.reqRespRequest]: {request: RequestTypedContainer; peer: PeerId};
|
||||
[NetworkEvent.unknownBlockParent]: {blockInput: BlockInput; peer: PeerIdStr};
|
||||
|
||||
@@ -19,6 +19,7 @@ import {GossipTopic, GossipType} from "./interface.js";
|
||||
import {Eth2GossipsubMetrics, createEth2GossipsubMetrics} from "./metrics.js";
|
||||
import {GossipTopicCache, getCoreTopicsAtFork, stringifyGossipTopic} from "./topic.js";
|
||||
|
||||
import {NetworkConfig} from "../networkConfig.js";
|
||||
import {
|
||||
GOSSIP_D,
|
||||
GOSSIP_D_HIGH,
|
||||
@@ -39,7 +40,7 @@ export type Eth2Context = {
|
||||
};
|
||||
|
||||
export type Eth2GossipsubModules = {
|
||||
config: BeaconConfig;
|
||||
networkConfig: NetworkConfig;
|
||||
libp2p: Libp2p;
|
||||
logger: Logger;
|
||||
metricsRegister: RegistryMetricCreator | null;
|
||||
@@ -84,10 +85,11 @@ export class Eth2Gossipsub extends GossipSub {
|
||||
|
||||
constructor(opts: Eth2GossipsubOpts, modules: Eth2GossipsubModules) {
|
||||
const {allowPublishToZeroPeers, gossipsubD, gossipsubDLow, gossipsubDHigh} = opts;
|
||||
const gossipTopicCache = new GossipTopicCache(modules.config);
|
||||
const {networkConfig, logger, metricsRegister, peersData, events} = modules;
|
||||
const {config} = networkConfig;
|
||||
const gossipTopicCache = new GossipTopicCache(config);
|
||||
|
||||
const scoreParams = computeGossipPeerScoreParams(modules);
|
||||
const {config, logger, metricsRegister, peersData, events} = modules;
|
||||
const scoreParams = computeGossipPeerScoreParams({config, eth2Context: modules.eth2Context});
|
||||
|
||||
// Gossipsub parameters defined here:
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/p2p-interface.md#the-gossip-domain-gossipsub
|
||||
@@ -126,7 +128,7 @@ export class Eth2Gossipsub extends GossipSub {
|
||||
),
|
||||
metricsRegister: metricsRegister as MetricsRegister | null,
|
||||
metricsTopicStrToLabel: metricsRegister
|
||||
? getMetricsTopicStrToLabel(config, {disableLightClientServer: opts.disableLightClientServer ?? false})
|
||||
? getMetricsTopicStrToLabel(networkConfig, {disableLightClientServer: opts.disableLightClientServer ?? false})
|
||||
: undefined,
|
||||
asyncValidation: true,
|
||||
|
||||
@@ -332,7 +334,11 @@ function attSubnetLabel(subnet: SubnetID): string {
|
||||
return `0${subnet}`;
|
||||
}
|
||||
|
||||
function getMetricsTopicStrToLabel(config: BeaconConfig, opts: {disableLightClientServer: boolean}): TopicStrToLabel {
|
||||
function getMetricsTopicStrToLabel(
|
||||
networkConfig: NetworkConfig,
|
||||
opts: {disableLightClientServer: boolean}
|
||||
): TopicStrToLabel {
|
||||
const {config} = networkConfig;
|
||||
const metricsTopicStrToLabel = new Map<TopicStr, TopicLabel>();
|
||||
const {forkBoundariesAscendingEpochOrder} = config;
|
||||
|
||||
@@ -345,7 +351,7 @@ function getMetricsTopicStrToLabel(config: BeaconConfig, opts: {disableLightClie
|
||||
continue;
|
||||
}
|
||||
|
||||
const topics = getCoreTopicsAtFork(config, currentForkBoundary.fork, {
|
||||
const topics = getCoreTopicsAtFork(networkConfig, currentForkBoundary.fork, {
|
||||
subscribeAllSubnets: true,
|
||||
disableLightClientServer: opts.disableLightClientServer,
|
||||
});
|
||||
|
||||
@@ -13,6 +13,7 @@ import {
|
||||
altair,
|
||||
capella,
|
||||
deneb,
|
||||
fulu,
|
||||
phase0,
|
||||
} from "@lodestar/types";
|
||||
import {Logger} from "@lodestar/utils";
|
||||
@@ -25,6 +26,7 @@ import {JobItemQueue} from "../../util/queue/index.js";
|
||||
export enum GossipType {
|
||||
beacon_block = "beacon_block",
|
||||
blob_sidecar = "blob_sidecar",
|
||||
data_column_sidecar = "data_column_sidecar",
|
||||
beacon_aggregate_and_proof = "beacon_aggregate_and_proof",
|
||||
beacon_attestation = "beacon_attestation",
|
||||
voluntary_exit = "voluntary_exit",
|
||||
@@ -56,6 +58,7 @@ export interface IGossipTopic {
|
||||
export type GossipTopicTypeMap = {
|
||||
[GossipType.beacon_block]: {type: GossipType.beacon_block};
|
||||
[GossipType.blob_sidecar]: {type: GossipType.blob_sidecar; subnet: SubnetID};
|
||||
[GossipType.data_column_sidecar]: {type: GossipType.data_column_sidecar; subnet: SubnetID};
|
||||
[GossipType.beacon_aggregate_and_proof]: {type: GossipType.beacon_aggregate_and_proof};
|
||||
[GossipType.beacon_attestation]: {type: GossipType.beacon_attestation; subnet: SubnetID};
|
||||
[GossipType.voluntary_exit]: {type: GossipType.voluntary_exit};
|
||||
@@ -88,6 +91,7 @@ export type GossipTypeMap = {
|
||||
[GossipType.blob_sidecar]: deneb.BlobSidecar;
|
||||
[GossipType.beacon_aggregate_and_proof]: SignedAggregateAndProof;
|
||||
[GossipType.beacon_attestation]: SingleAttestation;
|
||||
[GossipType.data_column_sidecar]: fulu.DataColumnSidecar;
|
||||
[GossipType.voluntary_exit]: phase0.SignedVoluntaryExit;
|
||||
[GossipType.proposer_slashing]: phase0.ProposerSlashing;
|
||||
[GossipType.attester_slashing]: AttesterSlashing;
|
||||
@@ -103,6 +107,7 @@ export type GossipFnByType = {
|
||||
[GossipType.blob_sidecar]: (blobSidecar: deneb.BlobSidecar) => Promise<void> | void;
|
||||
[GossipType.beacon_aggregate_and_proof]: (aggregateAndProof: SignedAggregateAndProof) => Promise<void> | void;
|
||||
[GossipType.beacon_attestation]: (attestation: SingleAttestation) => Promise<void> | void;
|
||||
[GossipType.data_column_sidecar]: (dataColumnSidecar: fulu.DataColumnSidecar) => Promise<void> | void;
|
||||
[GossipType.voluntary_exit]: (voluntaryExit: phase0.SignedVoluntaryExit) => Promise<void> | void;
|
||||
[GossipType.proposer_slashing]: (proposerSlashing: phase0.ProposerSlashing) => Promise<void> | void;
|
||||
[GossipType.attester_slashing]: (attesterSlashing: AttesterSlashing) => Promise<void> | void;
|
||||
@@ -204,7 +209,7 @@ export type BatchGossipHandler<K extends GossipType> = (
|
||||
gossipHandlerParams: GossipHandlerParamGeneric<K>[]
|
||||
) => Promise<(null | GossipActionError<AttestationErrorType>)[]>;
|
||||
|
||||
// biome-ignore lint/suspicious/noExplicitAny: <explanation>
|
||||
// biome-ignore lint/suspicious/noExplicitAny: Need the usage of `any` here to infer any type
|
||||
export type ResolvedType<F extends (...args: any) => Promise<any>> = F extends (...args: any) => Promise<infer T>
|
||||
? T
|
||||
: never;
|
||||
|
||||
@@ -8,7 +8,7 @@ import {BeaconConfig} from "@lodestar/config";
|
||||
import {ATTESTATION_SUBNET_COUNT, SLOTS_PER_EPOCH, TARGET_AGGREGATORS_PER_COMMITTEE} from "@lodestar/params";
|
||||
import {computeCommitteeCount} from "@lodestar/state-transition";
|
||||
import {getActiveForkBoundaries} from "../forks.js";
|
||||
import {Eth2Context, Eth2GossipsubModules} from "./gossipsub.js";
|
||||
import {Eth2Context} from "./gossipsub.js";
|
||||
import {GossipType} from "./interface.js";
|
||||
import {stringifyGossipTopic} from "./topic.js";
|
||||
|
||||
@@ -80,7 +80,10 @@ type TopicScoreInput = {
|
||||
export function computeGossipPeerScoreParams({
|
||||
config,
|
||||
eth2Context,
|
||||
}: Pick<Eth2GossipsubModules, "config" | "eth2Context">): Partial<PeerScoreParams> {
|
||||
}: {
|
||||
config: BeaconConfig;
|
||||
eth2Context: Eth2Context;
|
||||
}): Partial<PeerScoreParams> {
|
||||
const decayIntervalMs = config.SECONDS_PER_SLOT * 1000;
|
||||
const decayToZero = 0.01;
|
||||
const epochDurationMs = config.SECONDS_PER_SLOT * SLOTS_PER_EPOCH * 1000;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import {ChainConfig, ForkDigestContext} from "@lodestar/config";
|
||||
import {ForkDigestContext} from "@lodestar/config";
|
||||
import {
|
||||
ATTESTATION_SUBNET_COUNT,
|
||||
ForkName,
|
||||
@@ -10,6 +10,7 @@ import {
|
||||
import {Attestation, SingleAttestation, ssz, sszTypesFor} from "@lodestar/types";
|
||||
|
||||
import {GossipAction, GossipActionError, GossipErrorCode} from "../../chain/errors/gossipValidation.js";
|
||||
import {NetworkConfig} from "../networkConfig.js";
|
||||
import {DEFAULT_ENCODING} from "./constants.js";
|
||||
import {GossipEncoding, GossipTopic, GossipTopicTypeMap, GossipType, SSZTypeOfGossipTopic} from "./interface.js";
|
||||
|
||||
@@ -75,6 +76,8 @@ function stringifyGossipTopicType(topic: GossipTopic): string {
|
||||
return `${topic.type}_${topic.subnet}`;
|
||||
case GossipType.blob_sidecar:
|
||||
return `${topic.type}_${topic.subnet}`;
|
||||
case GossipType.data_column_sidecar:
|
||||
return `${topic.type}_${topic.subnet}`;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,6 +89,8 @@ export function getGossipSSZType(topic: GossipTopic) {
|
||||
return ssz[fork].SignedBeaconBlock;
|
||||
case GossipType.blob_sidecar:
|
||||
return ssz.deneb.BlobSidecar;
|
||||
case GossipType.data_column_sidecar:
|
||||
return ssz.fulu.DataColumnSidecar;
|
||||
case GossipType.beacon_aggregate_and_proof:
|
||||
return sszTypesFor(fork).SignedAggregateAndProof;
|
||||
case GossipType.beacon_attestation:
|
||||
@@ -205,6 +210,13 @@ export function parseGossipTopic(forkDigestContext: ForkDigestContext, topicStr:
|
||||
return {type: GossipType.blob_sidecar, subnet, boundary, encoding};
|
||||
}
|
||||
|
||||
if (gossipTypeStr.startsWith(GossipType.data_column_sidecar)) {
|
||||
const subnetStr = gossipTypeStr.slice(GossipType.data_column_sidecar.length + 1); // +1 for '_' concatenating the topic name and the subnet
|
||||
const subnet = parseInt(subnetStr, 10);
|
||||
if (Number.isNaN(subnet)) throw Error(`subnet ${subnetStr} is not a number`);
|
||||
return {type: GossipType.data_column_sidecar, subnet, boundary, encoding};
|
||||
}
|
||||
|
||||
throw Error(`Unknown gossip type ${gossipTypeStr}`);
|
||||
} catch (e) {
|
||||
(e as Error).message = `Invalid gossip topic ${topicStr}: ${(e as Error).message}`;
|
||||
@@ -216,7 +228,7 @@ export function parseGossipTopic(forkDigestContext: ForkDigestContext, topicStr:
|
||||
* De-duplicate logic to pick fork topics between subscribeCoreTopicsAtFork and unsubscribeCoreTopicsAtFork
|
||||
*/
|
||||
export function getCoreTopicsAtFork(
|
||||
config: ChainConfig,
|
||||
networkConfig: NetworkConfig,
|
||||
fork: ForkName,
|
||||
opts: {subscribeAllSubnets?: boolean; disableLightClientServer?: boolean}
|
||||
): GossipTopicTypeMap[keyof GossipTopicTypeMap][] {
|
||||
@@ -229,8 +241,14 @@ export function getCoreTopicsAtFork(
|
||||
{type: GossipType.attester_slashing},
|
||||
];
|
||||
|
||||
// After fulu also track data_column_sidecar_{index}
|
||||
if (ForkSeq[fork] >= ForkSeq.fulu) {
|
||||
topics.push(...getDataColumnSidecarTopics(networkConfig));
|
||||
}
|
||||
|
||||
// After Deneb also track blob_sidecar_{subnet_id}
|
||||
if (ForkSeq[fork] >= ForkSeq.deneb) {
|
||||
const {config} = networkConfig;
|
||||
const subnetCount = isForkPostElectra(fork)
|
||||
? config.BLOB_SIDECAR_SUBNET_COUNT_ELECTRA
|
||||
: config.BLOB_SIDECAR_SUBNET_COUNT;
|
||||
@@ -268,6 +286,22 @@ export function getCoreTopicsAtFork(
|
||||
return topics;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pick data column subnets to subscribe to post-fulu.
|
||||
*/
|
||||
export function getDataColumnSidecarTopics(
|
||||
networkConfig: NetworkConfig
|
||||
): GossipTopicTypeMap[keyof GossipTopicTypeMap][] {
|
||||
const topics: GossipTopicTypeMap[keyof GossipTopicTypeMap][] = [];
|
||||
|
||||
const subnets = networkConfig.custodyConfig.sampledSubnets;
|
||||
for (const subnet of subnets) {
|
||||
topics.push({type: GossipType.data_column_sidecar, subnet});
|
||||
}
|
||||
|
||||
return topics;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that a `encodingStr` is a known `GossipEncoding`
|
||||
*/
|
||||
@@ -285,6 +319,7 @@ function parseEncodingStr(encodingStr: string): GossipEncoding {
|
||||
export const gossipTopicIgnoreDuplicatePublishError: Record<GossipType, boolean> = {
|
||||
[GossipType.beacon_block]: true,
|
||||
[GossipType.blob_sidecar]: true,
|
||||
[GossipType.data_column_sidecar]: true,
|
||||
[GossipType.beacon_aggregate_and_proof]: true,
|
||||
[GossipType.beacon_attestation]: true,
|
||||
[GossipType.voluntary_exit]: true,
|
||||
|
||||
@@ -29,16 +29,19 @@ import {
|
||||
altair,
|
||||
capella,
|
||||
deneb,
|
||||
fulu,
|
||||
phase0,
|
||||
} from "@lodestar/types";
|
||||
import type {Datastore} from "interface-datastore";
|
||||
import {Libp2p as ILibp2p} from "libp2p";
|
||||
import {CustodyConfig} from "../util/dataColumns.js";
|
||||
import {PeerIdStr} from "../util/peerId.js";
|
||||
import {BlobSidecarsByRootRequest} from "../util/types.js";
|
||||
import {BeaconBlocksByRootRequest, BlobSidecarsByRootRequest, DataColumnSidecarsByRootRequest} from "../util/types.js";
|
||||
import {INetworkCorePublic} from "./core/types.js";
|
||||
import {INetworkEventBus} from "./events.js";
|
||||
import {GossipType} from "./gossip/interface.js";
|
||||
import {PeerAction} from "./peers/index.js";
|
||||
import {PeerSyncMeta} from "./peers/peersData.js";
|
||||
import {PendingGossipsubMessage} from "./processor/types.js";
|
||||
|
||||
/**
|
||||
@@ -51,10 +54,13 @@ import {PendingGossipsubMessage} from "./processor/types.js";
|
||||
*/
|
||||
|
||||
export interface INetwork extends INetworkCorePublic {
|
||||
readonly peerId: PeerId;
|
||||
readonly custodyConfig: CustodyConfig;
|
||||
readonly closed: boolean;
|
||||
events: INetworkEventBus;
|
||||
|
||||
getConnectedPeers(): PeerIdStr[];
|
||||
getConnectedPeerSyncMeta(peerId: PeerIdStr): PeerSyncMeta;
|
||||
getConnectedPeerCount(): number;
|
||||
isSubscribedToGossipCoreTopics(): boolean;
|
||||
reportPeer(peer: PeerIdStr, action: PeerAction, actionName: string): void;
|
||||
@@ -68,16 +74,25 @@ export interface INetwork extends INetworkCorePublic {
|
||||
): Promise<WithBytes<SignedBeaconBlock>[]>;
|
||||
sendBeaconBlocksByRoot(
|
||||
peerId: PeerIdStr,
|
||||
request: phase0.BeaconBlocksByRootRequest
|
||||
request: BeaconBlocksByRootRequest
|
||||
): Promise<WithBytes<SignedBeaconBlock>[]>;
|
||||
sendBlobSidecarsByRange(peerId: PeerIdStr, request: deneb.BlobSidecarsByRangeRequest): Promise<deneb.BlobSidecar[]>;
|
||||
sendBlobSidecarsByRoot(peerId: PeerIdStr, request: BlobSidecarsByRootRequest): Promise<deneb.BlobSidecar[]>;
|
||||
sendDataColumnSidecarsByRange(
|
||||
peerId: PeerIdStr,
|
||||
request: fulu.DataColumnSidecarsByRangeRequest
|
||||
): Promise<fulu.DataColumnSidecar[]>;
|
||||
sendDataColumnSidecarsByRoot(
|
||||
peerId: PeerIdStr,
|
||||
request: DataColumnSidecarsByRootRequest
|
||||
): Promise<fulu.DataColumnSidecar[]>;
|
||||
|
||||
// Gossip
|
||||
publishBeaconBlock(signedBlock: SignedBeaconBlock): Promise<number>;
|
||||
publishBlobSidecar(blobSidecar: deneb.BlobSidecar): Promise<number>;
|
||||
publishBeaconAggregateAndProof(aggregateAndProof: SignedAggregateAndProof): Promise<number>;
|
||||
publishBeaconAttestation(attestation: SingleAttestation, subnet: SubnetID): Promise<number>;
|
||||
publishDataColumnSidecar(dataColumnSideCar: fulu.DataColumnSidecar): Promise<number>;
|
||||
publishVoluntaryExit(voluntaryExit: phase0.SignedVoluntaryExit): Promise<number>;
|
||||
publishBlsToExecutionChange(blsToExecutionChange: capella.SignedBLSToExecutionChange): Promise<number>;
|
||||
publishProposerSlashing(proposerSlashing: phase0.ProposerSlashing): Promise<number>;
|
||||
|
||||
@@ -2,16 +2,19 @@ import {BitArray} from "@chainsafe/ssz";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {ForkSeq} from "@lodestar/params";
|
||||
import {computeStartSlotAtEpoch} from "@lodestar/state-transition";
|
||||
import {Epoch, altair, phase0, ssz} from "@lodestar/types";
|
||||
import {Epoch, fulu, phase0, ssz} from "@lodestar/types";
|
||||
import {Logger, toHex} from "@lodestar/utils";
|
||||
import {FAR_FUTURE_EPOCH} from "../constants/index.js";
|
||||
import {serializeCgc} from "../util/metadata.js";
|
||||
import {getCurrentAndNextForkBoundary} from "./forks.js";
|
||||
import {NetworkConfig} from "./networkConfig.js";
|
||||
|
||||
export enum ENRKey {
|
||||
tcp = "tcp",
|
||||
eth2 = "eth2",
|
||||
attnets = "attnets",
|
||||
syncnets = "syncnets",
|
||||
cgc = "cgc",
|
||||
nfd = "nfd",
|
||||
}
|
||||
export enum SubnetType {
|
||||
@@ -20,11 +23,11 @@ export enum SubnetType {
|
||||
}
|
||||
|
||||
export type MetadataOpts = {
|
||||
metadata?: altair.Metadata;
|
||||
metadata?: fulu.Metadata;
|
||||
};
|
||||
|
||||
export type MetadataModules = {
|
||||
config: BeaconConfig;
|
||||
networkConfig: NetworkConfig;
|
||||
logger: Logger;
|
||||
onSetValue: (key: string, value: Uint8Array) => void;
|
||||
};
|
||||
@@ -36,15 +39,18 @@ export type MetadataModules = {
|
||||
*/
|
||||
export class MetadataController {
|
||||
private onSetValue: (key: string, value: Uint8Array) => void;
|
||||
private config: BeaconConfig;
|
||||
private networkConfig: NetworkConfig;
|
||||
private logger: Logger;
|
||||
private _metadata: altair.Metadata;
|
||||
private _metadata: fulu.Metadata;
|
||||
|
||||
constructor(opts: MetadataOpts, modules: MetadataModules) {
|
||||
this.config = modules.config;
|
||||
this.networkConfig = modules.networkConfig;
|
||||
this.logger = modules.logger;
|
||||
this.onSetValue = modules.onSetValue;
|
||||
this._metadata = opts.metadata || ssz.altair.Metadata.defaultValue();
|
||||
this._metadata = opts.metadata ?? {
|
||||
...ssz.fulu.Metadata.defaultValue(),
|
||||
custodyGroupCount: modules.networkConfig.custodyConfig.targetCustodyGroupCount,
|
||||
};
|
||||
}
|
||||
|
||||
upstreamValues(currentEpoch: Epoch): void {
|
||||
@@ -53,11 +59,16 @@ export class MetadataController {
|
||||
|
||||
this.onSetValue(ENRKey.attnets, ssz.phase0.AttestationSubnets.serialize(this._metadata.attnets));
|
||||
|
||||
if (this.config.getForkSeq(computeStartSlotAtEpoch(currentEpoch)) >= ForkSeq.altair) {
|
||||
const config = this.networkConfig.config;
|
||||
|
||||
if (config.getForkSeq(computeStartSlotAtEpoch(currentEpoch)) >= ForkSeq.altair) {
|
||||
// Only persist syncnets if altair fork is already activated. If currentFork is altair but head is phase0
|
||||
// adding syncnets to the ENR is not a problem, we will just have a useless field for a few hours.
|
||||
this.onSetValue(ENRKey.syncnets, ssz.phase0.AttestationSubnets.serialize(this._metadata.syncnets));
|
||||
}
|
||||
|
||||
// Set CGC regardless of fork. It may be useful to clients before Fulu, and will be ignored otherwise.
|
||||
this.onSetValue(ENRKey.cgc, serializeCgc(this._metadata.custodyGroupCount));
|
||||
}
|
||||
|
||||
get seqNumber(): bigint {
|
||||
@@ -83,8 +94,22 @@ export class MetadataController {
|
||||
this._metadata.attnets = attnets;
|
||||
}
|
||||
|
||||
get custodyGroupCount(): number {
|
||||
return this._metadata.custodyGroupCount;
|
||||
}
|
||||
|
||||
set custodyGroupCount(custodyGroupCount: number) {
|
||||
if (custodyGroupCount === this._metadata.custodyGroupCount) {
|
||||
return;
|
||||
}
|
||||
this.onSetValue(ENRKey.cgc, serializeCgc(custodyGroupCount));
|
||||
this.logger.debug("Updated cgc field in ENR", {custodyGroupCount});
|
||||
this._metadata.seqNumber++;
|
||||
this._metadata.custodyGroupCount = custodyGroupCount;
|
||||
}
|
||||
|
||||
/** Consumers that need the phase0.Metadata type can just ignore the .syncnets property */
|
||||
get json(): altair.Metadata {
|
||||
get json(): fulu.Metadata {
|
||||
return this._metadata;
|
||||
}
|
||||
|
||||
@@ -101,7 +126,8 @@ export class MetadataController {
|
||||
* Current Clock implementation ensures no race conditions, epoch is correct if re-fetched
|
||||
*/
|
||||
updateEth2Field(epoch: Epoch): void {
|
||||
const enrForkId = getENRForkID(this.config, epoch);
|
||||
const config = this.networkConfig.config;
|
||||
const enrForkId = getENRForkID(config, epoch);
|
||||
const {forkDigest, nextForkVersion, nextForkEpoch} = enrForkId;
|
||||
this.onSetValue(ENRKey.eth2, ssz.phase0.ENRForkID.serialize(enrForkId));
|
||||
this.logger.debug("Updated eth2 field in ENR", {
|
||||
@@ -112,7 +138,7 @@ export class MetadataController {
|
||||
|
||||
const nextForkDigest =
|
||||
nextForkEpoch !== FAR_FUTURE_EPOCH
|
||||
? this.config.forkBoundary2ForkDigest(this.config.getForkBoundaryAtEpoch(nextForkEpoch))
|
||||
? config.forkBoundary2ForkDigest(config.getForkBoundaryAtEpoch(nextForkEpoch))
|
||||
: ssz.ForkDigest.defaultValue();
|
||||
this.onSetValue(ENRKey.nfd, nextForkDigest);
|
||||
this.logger.debug("Updated nfd field in ENR", {nextForkDigest: toHex(nextForkDigest)});
|
||||
|
||||
@@ -5,7 +5,7 @@ import {peerIdFromPrivateKey} from "@libp2p/peer-id";
|
||||
import {routes} from "@lodestar/api";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {LoggerNode} from "@lodestar/logger/node";
|
||||
import {ForkSeq} from "@lodestar/params";
|
||||
import {ForkSeq, NUMBER_OF_COLUMNS} from "@lodestar/params";
|
||||
import {ResponseIncoming} from "@lodestar/reqresp";
|
||||
import {computeEpochAtSlot, computeTimeAtSlot} from "@lodestar/state-transition";
|
||||
import {
|
||||
@@ -24,15 +24,19 @@ import {
|
||||
altair,
|
||||
capella,
|
||||
deneb,
|
||||
fulu,
|
||||
phase0,
|
||||
} from "@lodestar/types";
|
||||
import {sleep} from "@lodestar/utils";
|
||||
import {IBeaconChain} from "../chain/index.js";
|
||||
import {prettyPrintIndices, sleep} from "@lodestar/utils";
|
||||
import {ChainEvent, IBeaconChain} from "../chain/index.js";
|
||||
import {computeSubnetForDataColumnSidecar} from "../chain/validation/dataColumnSidecar.js";
|
||||
import {IBeaconDb} from "../db/interface.js";
|
||||
import {Metrics, RegistryMetricCreator} from "../metrics/index.js";
|
||||
import {IClock} from "../util/clock.js";
|
||||
import {CustodyConfig} from "../util/dataColumns.js";
|
||||
import {PeerIdStr, peerIdToString} from "../util/peerId.js";
|
||||
import {BlobSidecarsByRootRequest} from "../util/types.js";
|
||||
import {promiseAllMaybeAsync} from "../util/promises.js";
|
||||
import {BeaconBlocksByRootRequest, BlobSidecarsByRootRequest, DataColumnSidecarsByRootRequest} from "../util/types.js";
|
||||
import {INetworkCore, NetworkCore, WorkerNetworkCore} from "./core/index.js";
|
||||
import {INetworkEventBus, NetworkEvent, NetworkEventBus, NetworkEventData} from "./events.js";
|
||||
import {getActiveForkBoundaries} from "./forks.js";
|
||||
@@ -41,6 +45,7 @@ import {getGossipSSZType, gossipTopicIgnoreDuplicatePublishError, stringifyGossi
|
||||
import {INetwork} from "./interface.js";
|
||||
import {NetworkOptions} from "./options.js";
|
||||
import {PeerAction, PeerScoreStats} from "./peers/index.js";
|
||||
import {PeerSyncMeta} from "./peers/peersData.js";
|
||||
import {AggregatorTracker} from "./processor/aggregatorTracker.js";
|
||||
import {NetworkProcessor, PendingGossipsubMessage} from "./processor/index.js";
|
||||
import {ReqRespMethod} from "./reqresp/index.js";
|
||||
@@ -52,7 +57,7 @@ import {
|
||||
} from "./reqresp/utils/collect.js";
|
||||
import {collectSequentialBlocksInRange} from "./reqresp/utils/collectSequentialBlocksInRange.js";
|
||||
import {CommitteeSubscription} from "./subnets/index.js";
|
||||
import {isPublishToZeroPeersError} from "./util.js";
|
||||
import {isPublishToZeroPeersError, prettyPrintPeerIdStr} from "./util.js";
|
||||
|
||||
type NetworkModules = {
|
||||
opts: NetworkOptions;
|
||||
@@ -90,6 +95,7 @@ export type NetworkInitModules = {
|
||||
*/
|
||||
export class Network implements INetwork {
|
||||
readonly peerId: PeerId;
|
||||
readonly custodyConfig: CustodyConfig;
|
||||
// TODO: Make private
|
||||
readonly events: INetworkEventBus;
|
||||
|
||||
@@ -106,11 +112,12 @@ export class Network implements INetwork {
|
||||
private readonly aggregatorTracker: AggregatorTracker;
|
||||
|
||||
private subscribedToCoreTopics = false;
|
||||
private connectedPeers = new Set<PeerIdStr>();
|
||||
private connectedPeersSyncMeta = new Map<PeerIdStr, Omit<PeerSyncMeta, "peerId">>();
|
||||
|
||||
constructor(modules: NetworkModules) {
|
||||
this.peerId = peerIdFromPrivateKey(modules.privateKey);
|
||||
this.config = modules.config;
|
||||
this.custodyConfig = modules.chain.custodyConfig;
|
||||
this.logger = modules.logger;
|
||||
this.chain = modules.chain;
|
||||
this.clock = modules.chain.clock;
|
||||
@@ -129,6 +136,9 @@ export class Network implements INetwork {
|
||||
this.chain.emitter.on(routes.events.EventType.lightClientOptimisticUpdate, ({data}) =>
|
||||
this.onLightClientOptimisticUpdate(data)
|
||||
);
|
||||
this.chain.emitter.on(ChainEvent.updateTargetCustodyGroupCount, this.onTargetGroupCountUpdated);
|
||||
this.chain.emitter.on(ChainEvent.publishDataColumns, this.onPublishDataColumns);
|
||||
this.chain.emitter.on(ChainEvent.updateStatus, this.onUpdateStatus);
|
||||
}
|
||||
|
||||
static async init({
|
||||
@@ -148,6 +158,7 @@ export class Network implements INetwork {
|
||||
|
||||
const activeValidatorCount = chain.getHeadState().epochCtx.currentShuffling.activeIndices.length;
|
||||
const initialStatus = chain.getStatus();
|
||||
const initialCustodyGroupCount = chain.custodyConfig.targetCustodyGroupCount;
|
||||
|
||||
if (opts.useWorker) {
|
||||
logger.info("running libp2p instance in worker thread");
|
||||
@@ -162,6 +173,7 @@ export class Network implements INetwork {
|
||||
activeValidatorCount,
|
||||
genesisTime: chain.genesisTime,
|
||||
initialStatus,
|
||||
initialCustodyGroupCount,
|
||||
},
|
||||
config,
|
||||
privateKey,
|
||||
@@ -181,6 +193,7 @@ export class Network implements INetwork {
|
||||
getReqRespHandler,
|
||||
metricsRegistry: metrics ? new RegistryMetricCreator() : null,
|
||||
initialStatus,
|
||||
initialCustodyGroupCount,
|
||||
activeValidatorCount,
|
||||
});
|
||||
|
||||
@@ -219,6 +232,9 @@ export class Network implements INetwork {
|
||||
this.chain.emitter.off(routes.events.EventType.head, this.onHead);
|
||||
this.chain.emitter.off(routes.events.EventType.lightClientFinalityUpdate, this.onLightClientFinalityUpdate);
|
||||
this.chain.emitter.off(routes.events.EventType.lightClientOptimisticUpdate, this.onLightClientOptimisticUpdate);
|
||||
this.chain.emitter.off(ChainEvent.updateTargetCustodyGroupCount, this.onTargetGroupCountUpdated);
|
||||
this.chain.emitter.off(ChainEvent.publishDataColumns, this.onPublishDataColumns);
|
||||
this.chain.emitter.off(ChainEvent.updateStatus, this.onUpdateStatus);
|
||||
await this.core.close();
|
||||
|
||||
// Used only for sleep() statements
|
||||
@@ -265,10 +281,19 @@ export class Network implements INetwork {
|
||||
|
||||
// REST API queries
|
||||
getConnectedPeers(): PeerIdStr[] {
|
||||
return Array.from(this.connectedPeers.values());
|
||||
return Array.from(this.connectedPeersSyncMeta.keys());
|
||||
}
|
||||
|
||||
getConnectedPeerSyncMeta(peerId: PeerIdStr): PeerSyncMeta {
|
||||
const syncMeta = this.connectedPeersSyncMeta.get(peerId);
|
||||
if (!syncMeta) {
|
||||
throw new Error(`peerId=${prettyPrintPeerIdStr(peerId)} not in connectedPeerSyncMeta`);
|
||||
}
|
||||
return {peerId, ...syncMeta};
|
||||
}
|
||||
|
||||
getConnectedPeerCount(): number {
|
||||
return this.connectedPeers.size;
|
||||
return this.connectedPeersSyncMeta.size;
|
||||
}
|
||||
|
||||
async getNetworkIdentity(): Promise<routes.node.NetworkIdentity> {
|
||||
@@ -327,6 +352,25 @@ export class Network implements INetwork {
|
||||
});
|
||||
}
|
||||
|
||||
async publishDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): Promise<number> {
|
||||
const epoch = computeEpochAtSlot(dataColumnSidecar.signedBlockHeader.message.slot);
|
||||
const boundary = this.config.getForkBoundaryAtEpoch(epoch);
|
||||
|
||||
const subnet = computeSubnetForDataColumnSidecar(this.config, dataColumnSidecar);
|
||||
return this.publishGossip<GossipType.data_column_sidecar>(
|
||||
{type: GossipType.data_column_sidecar, boundary, subnet},
|
||||
dataColumnSidecar,
|
||||
{
|
||||
ignoreDuplicatePublishError: true,
|
||||
// we ensure having all topic peers via prioritizePeers() function
|
||||
// in the worse case, if there is 0 peer on the topic, the overall publish operation could be still a success
|
||||
// because supernode will rebuild and publish missing data column sidecars for us
|
||||
// hence we want to track sent peers as 0 instead of an error
|
||||
allowPublishToZeroTopicPeers: true,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
async publishBeaconAggregateAndProof(aggregateAndProof: SignedAggregateAndProof): Promise<number> {
|
||||
const epoch = computeEpochAtSlot(aggregateAndProof.message.aggregate.data.slot);
|
||||
const boundary = this.config.getForkBoundaryAtEpoch(epoch);
|
||||
@@ -472,7 +516,7 @@ export class Network implements INetwork {
|
||||
peerId,
|
||||
ReqRespMethod.BeaconBlocksByRange,
|
||||
// Before altair, prioritize V2. After altair only request V2
|
||||
this.config.getForkSeq(this.clock.currentSlot) >= ForkSeq.altair ? [Version.V2] : [(Version.V2, Version.V1)],
|
||||
this.config.getForkSeq(this.clock.currentSlot) >= ForkSeq.altair ? [Version.V2] : [Version.V2, Version.V1],
|
||||
request
|
||||
),
|
||||
request
|
||||
@@ -481,14 +525,14 @@ export class Network implements INetwork {
|
||||
|
||||
async sendBeaconBlocksByRoot(
|
||||
peerId: PeerIdStr,
|
||||
request: phase0.BeaconBlocksByRootRequest
|
||||
request: BeaconBlocksByRootRequest
|
||||
): Promise<WithBytes<SignedBeaconBlock>[]> {
|
||||
return collectMaxResponseTypedWithBytes(
|
||||
this.sendReqRespRequest(
|
||||
peerId,
|
||||
ReqRespMethod.BeaconBlocksByRoot,
|
||||
// Before altair, prioritize V2. After altair only request V2
|
||||
this.config.getForkSeq(this.clock.currentSlot) >= ForkSeq.altair ? [Version.V2] : [(Version.V2, Version.V1)],
|
||||
this.config.getForkSeq(this.clock.currentSlot) >= ForkSeq.altair ? [Version.V2] : [Version.V2, Version.V1],
|
||||
request
|
||||
),
|
||||
request.length,
|
||||
@@ -549,6 +593,29 @@ export class Network implements INetwork {
|
||||
);
|
||||
}
|
||||
|
||||
async sendDataColumnSidecarsByRange(
|
||||
peerId: PeerIdStr,
|
||||
request: fulu.DataColumnSidecarsByRangeRequest
|
||||
): Promise<fulu.DataColumnSidecar[]> {
|
||||
return collectMaxResponseTyped(
|
||||
this.sendReqRespRequest(peerId, ReqRespMethod.DataColumnSidecarsByRange, [Version.V1], request),
|
||||
// request's count represent the slots, so the actual max count received could be slots * blobs per slot
|
||||
request.count * NUMBER_OF_COLUMNS,
|
||||
responseSszTypeByMethod[ReqRespMethod.DataColumnSidecarsByRange]
|
||||
);
|
||||
}
|
||||
|
||||
async sendDataColumnSidecarsByRoot(
|
||||
peerId: PeerIdStr,
|
||||
request: DataColumnSidecarsByRootRequest
|
||||
): Promise<fulu.DataColumnSidecar[]> {
|
||||
return collectMaxResponseTyped(
|
||||
this.sendReqRespRequest(peerId, ReqRespMethod.DataColumnSidecarsByRoot, [Version.V1], request),
|
||||
request.reduce((total, {columns}) => total + columns.length, 0),
|
||||
responseSszTypeByMethod[ReqRespMethod.DataColumnSidecarsByRoot]
|
||||
);
|
||||
}
|
||||
|
||||
private sendReqRespRequest<Req>(
|
||||
peerId: PeerIdStr,
|
||||
method: ReqRespMethod,
|
||||
@@ -658,14 +725,38 @@ export class Network implements INetwork {
|
||||
};
|
||||
|
||||
private onHead = async (): Promise<void> => {
|
||||
await this.core.updateStatus(this.chain.getStatus());
|
||||
await this.onUpdateStatus();
|
||||
};
|
||||
|
||||
private onPeerConnected = (data: NetworkEventData[NetworkEvent.peerConnected]): void => {
|
||||
this.connectedPeers.add(data.peer);
|
||||
const {peer, clientAgent, custodyGroups, status} = data;
|
||||
const earliestAvailableSlot = (status as fulu.Status).earliestAvailableSlot;
|
||||
this.logger.verbose("onPeerConnected", {
|
||||
peer,
|
||||
clientAgent,
|
||||
custodyGroups: prettyPrintIndices(custodyGroups),
|
||||
earliestAvailableSlot: earliestAvailableSlot ?? "pre-fulu",
|
||||
});
|
||||
this.connectedPeersSyncMeta.set(peer, {
|
||||
client: clientAgent,
|
||||
custodyGroups,
|
||||
earliestAvailableSlot, // can be undefined pre-fulu
|
||||
});
|
||||
};
|
||||
|
||||
private onPeerDisconnected = (data: NetworkEventData[NetworkEvent.peerDisconnected]): void => {
|
||||
this.connectedPeers.delete(data.peer);
|
||||
this.connectedPeersSyncMeta.delete(data.peer);
|
||||
};
|
||||
|
||||
private onTargetGroupCountUpdated = (count: number): void => {
|
||||
this.core.setTargetGroupCount(count);
|
||||
};
|
||||
|
||||
private onPublishDataColumns = (sidecars: fulu.DataColumnSidecar[]): Promise<number[]> => {
|
||||
return promiseAllMaybeAsync(sidecars.map((sidecar) => () => this.publishDataColumnSidecar(sidecar)));
|
||||
};
|
||||
|
||||
private onUpdateStatus = async (): Promise<void> => {
|
||||
await this.core.updateStatus(this.chain.getStatus());
|
||||
};
|
||||
}
|
||||
|
||||
12
packages/beacon-node/src/network/networkConfig.ts
Normal file
12
packages/beacon-node/src/network/networkConfig.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {CustodyConfig} from "../util/dataColumns.js";
|
||||
import {NodeId} from "./subnets/interface.js";
|
||||
|
||||
/**
|
||||
* Store shared data for different modules in the network stack.
|
||||
*/
|
||||
export type NetworkConfig = {
|
||||
readonly nodeId: NodeId;
|
||||
readonly config: BeaconConfig;
|
||||
readonly custodyConfig: CustodyConfig;
|
||||
};
|
||||
@@ -23,6 +23,7 @@ export interface NetworkOptions
|
||||
useWorker?: boolean;
|
||||
maxYoungGenerationSizeMb?: number;
|
||||
disableLightClientServer?: boolean;
|
||||
supernode?: boolean;
|
||||
|
||||
/**
|
||||
* During E2E tests observe a lot of following `missing stream`:
|
||||
@@ -45,8 +46,8 @@ export interface NetworkOptions
|
||||
}
|
||||
|
||||
export const defaultNetworkOptions: NetworkOptions = {
|
||||
maxPeers: 110, // Allow some room above targetPeers for new inbound peers
|
||||
targetPeers: 100,
|
||||
maxPeers: 210, // Allow some room above targetPeers for new inbound peers
|
||||
targetPeers: 200,
|
||||
localMultiaddrs: ["/ip4/0.0.0.0/tcp/9000"],
|
||||
bootMultiaddrs: [],
|
||||
/** disabled by default */
|
||||
@@ -61,4 +62,9 @@ export const defaultNetworkOptions: NetworkOptions = {
|
||||
slotsToSubscribeBeforeAggregatorDuty: 2,
|
||||
// This will enable the light client server by default
|
||||
disableLightClientServer: false,
|
||||
// specific option for fulu
|
||||
// - this is the same to TARGET_SUBNET_PEERS
|
||||
// - for fusaka-devnets, we have 25-30 peers per subnet
|
||||
// - for public testnets or mainnet, average number of peers per group is SAMPLES_PER_SLOT * targetPeers / NUMBER_OF_CUSTODY_GROUPS = 6.25 so this should not be an issue
|
||||
targetGroupPeers: 6,
|
||||
};
|
||||
|
||||
@@ -2,20 +2,25 @@ import {ENR} from "@chainsafe/enr";
|
||||
import type {PeerId, PeerInfo, PrivateKey} from "@libp2p/interface";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {LoggerNode} from "@lodestar/logger/node";
|
||||
import {ATTESTATION_SUBNET_COUNT, SYNC_COMMITTEE_SUBNET_COUNT} from "@lodestar/params";
|
||||
import {SubnetID} from "@lodestar/types";
|
||||
import {pruneSetToMax, sleep} from "@lodestar/utils";
|
||||
import {ATTESTATION_SUBNET_COUNT, ForkSeq, SYNC_COMMITTEE_SUBNET_COUNT} from "@lodestar/params";
|
||||
import {CustodyIndex, SubnetID} from "@lodestar/types";
|
||||
import {pruneSetToMax, sleep, toHex} from "@lodestar/utils";
|
||||
import {bytesToInt} from "@lodestar/utils";
|
||||
import {Multiaddr} from "@multiformats/multiaddr";
|
||||
import {IClock} from "../../util/clock.js";
|
||||
import {getCustodyGroups} from "../../util/dataColumns.js";
|
||||
import {NetworkCoreMetrics} from "../core/metrics.js";
|
||||
import {Discv5Worker} from "../discv5/index.js";
|
||||
import {LodestarDiscv5Opts} from "../discv5/types.js";
|
||||
import {Libp2p} from "../interface.js";
|
||||
import {getLibp2pError} from "../libp2p/error.js";
|
||||
import {ENRKey, SubnetType} from "../metadata.js";
|
||||
import {NetworkConfig} from "../networkConfig.js";
|
||||
import {computeNodeId} from "../subnets/interface.js";
|
||||
import {getConnectionsMap, prettyPrintPeerId} from "../util.js";
|
||||
import {IPeerRpcScoreStore, ScoreState} from "./score/index.js";
|
||||
import {deserializeEnrSubnets, zeroAttnets, zeroSyncnets} from "./utils/enrSubnetsDeserialize.js";
|
||||
import {type CustodyGroupQueries} from "./utils/prioritizePeers.js";
|
||||
|
||||
/** Max number of cached ENRs after discovering a good peer */
|
||||
const MAX_CACHED_ENRS = 100;
|
||||
@@ -30,12 +35,12 @@ export type PeerDiscoveryOpts = {
|
||||
|
||||
export type PeerDiscoveryModules = {
|
||||
privateKey: PrivateKey;
|
||||
networkConfig: NetworkConfig;
|
||||
libp2p: Libp2p;
|
||||
clock: IClock;
|
||||
peerRpcScores: IPeerRpcScoreStore;
|
||||
metrics: NetworkCoreMetrics | null;
|
||||
logger: LoggerNode;
|
||||
config: BeaconConfig;
|
||||
};
|
||||
|
||||
type PeerIdStr = string;
|
||||
@@ -87,6 +92,8 @@ type CachedENR = {
|
||||
multiaddrTCP: Multiaddr;
|
||||
subnets: Record<SubnetType, boolean[]>;
|
||||
addedUnixMs: number;
|
||||
// custodyGroups is null for pre-fulu
|
||||
custodyGroups: number[] | null;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -96,9 +103,11 @@ type CachedENR = {
|
||||
export class PeerDiscovery {
|
||||
readonly discv5: Discv5Worker;
|
||||
private libp2p: Libp2p;
|
||||
private readonly clock: IClock;
|
||||
private peerRpcScores: IPeerRpcScoreStore;
|
||||
private metrics: NetworkCoreMetrics | null;
|
||||
private logger: LoggerNode;
|
||||
private config: BeaconConfig;
|
||||
private cachedENRs = new Map<PeerIdStr, CachedENR>();
|
||||
private randomNodeQuery: QueryStatus = {code: QueryStatusCode.NotActive};
|
||||
private peersToConnect = 0;
|
||||
@@ -107,18 +116,24 @@ export class PeerDiscovery {
|
||||
syncnets: new Map(),
|
||||
};
|
||||
|
||||
private custodyGroupQueries: CustodyGroupQueries;
|
||||
|
||||
private discv5StartMs: number;
|
||||
private discv5FirstQueryDelayMs: number;
|
||||
|
||||
private connectToDiscv5BootnodesOnStart: boolean | undefined = false;
|
||||
|
||||
constructor(modules: PeerDiscoveryModules, opts: PeerDiscoveryOpts, discv5: Discv5Worker) {
|
||||
const {libp2p, peerRpcScores, metrics, logger} = modules;
|
||||
const {libp2p, clock, peerRpcScores, metrics, logger, networkConfig} = modules;
|
||||
this.libp2p = libp2p;
|
||||
this.clock = clock;
|
||||
this.peerRpcScores = peerRpcScores;
|
||||
this.metrics = metrics;
|
||||
this.logger = logger;
|
||||
this.config = networkConfig.config;
|
||||
this.discv5 = discv5;
|
||||
this.custodyGroupQueries = new Map();
|
||||
|
||||
this.discv5StartMs = 0;
|
||||
this.discv5StartMs = Date.now();
|
||||
this.discv5FirstQueryDelayMs = opts.discv5FirstQueryDelayMs;
|
||||
@@ -149,6 +164,13 @@ export class PeerDiscovery {
|
||||
metrics.discovery.cachedENRsSize.addCollect(() => {
|
||||
metrics.discovery.cachedENRsSize.set(this.cachedENRs.size);
|
||||
metrics.discovery.peersToConnect.set(this.peersToConnect);
|
||||
|
||||
// PeerDAS metrics
|
||||
const groupsToConnect = Array.from(this.custodyGroupQueries.values());
|
||||
const groupPeersToConnect = groupsToConnect.reduce((acc, elem) => acc + elem, 0);
|
||||
metrics.discovery.custodyGroupPeersToConnect.set(groupPeersToConnect);
|
||||
metrics.discovery.custodyGroupsToConnect.set(groupsToConnect.filter((elem) => elem > 0).length);
|
||||
|
||||
for (const type of [SubnetType.attnets, SubnetType.syncnets]) {
|
||||
const subnetPeersToConnect = Array.from(this.subnetRequests[type].values()).reduce(
|
||||
(acc, {peersToConnect}) => acc + peersToConnect,
|
||||
@@ -167,7 +189,7 @@ export class PeerDiscovery {
|
||||
privateKey: modules.privateKey,
|
||||
metrics: modules.metrics ?? undefined,
|
||||
logger: modules.logger,
|
||||
config: modules.config,
|
||||
config: modules.networkConfig.config,
|
||||
genesisTime: modules.clock.genesisTime,
|
||||
});
|
||||
|
||||
@@ -182,8 +204,13 @@ export class PeerDiscovery {
|
||||
|
||||
/**
|
||||
* Request to find peers, both on specific subnets and in general
|
||||
* pre-fulu custodyGroupRequests is empty
|
||||
*/
|
||||
discoverPeers(peersToConnect: number, subnetRequests: SubnetDiscvQueryMs[] = []): void {
|
||||
discoverPeers(
|
||||
peersToConnect: number,
|
||||
custodyGroupRequests: CustodyGroupQueries,
|
||||
subnetRequests: SubnetDiscvQueryMs[] = []
|
||||
): void {
|
||||
const subnetsToDiscoverPeers: SubnetDiscvQueryMs[] = [];
|
||||
const cachedENRsToDial = new Map<PeerIdStr, CachedENR>();
|
||||
// Iterate in reverse to consider first the most recent ENRs
|
||||
@@ -209,15 +236,44 @@ export class PeerDiscovery {
|
||||
|
||||
this.peersToConnect += peersToConnect;
|
||||
|
||||
// starting from PeerDAS, we need to prioritize column subnet peers first in order to have stable subnet sampling
|
||||
const groupsToDiscover = new Set<CustodyIndex>();
|
||||
let groupPeersToDiscover = 0;
|
||||
|
||||
const forkSeq = this.config.getForkSeq(this.clock.currentSlot);
|
||||
if (forkSeq >= ForkSeq.fulu) {
|
||||
group: for (const [group, maxPeersToConnect] of custodyGroupRequests) {
|
||||
let cachedENRsInGroup = 0;
|
||||
for (const cachedENR of cachedENRsReverse) {
|
||||
if (cachedENR.custodyGroups?.includes(group)) {
|
||||
cachedENRsToDial.set(cachedENR.peerId.toString(), cachedENR);
|
||||
|
||||
if (++cachedENRsInGroup >= maxPeersToConnect) {
|
||||
continue group;
|
||||
}
|
||||
}
|
||||
|
||||
const groupPeersToConnect = Math.max(maxPeersToConnect - cachedENRsInGroup, 0);
|
||||
this.custodyGroupQueries.set(group, groupPeersToConnect);
|
||||
groupsToDiscover.add(group);
|
||||
groupPeersToDiscover += groupPeersToConnect;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
subnet: for (const subnetRequest of subnetRequests) {
|
||||
// Get cached ENRs from the discovery service that are in the requested `subnetId`, but not connected yet
|
||||
let cachedENRsInSubnet = 0;
|
||||
for (const cachedENR of cachedENRsReverse) {
|
||||
if (cachedENR.subnets[subnetRequest.type][subnetRequest.subnet]) {
|
||||
cachedENRsToDial.set(cachedENR.peerId.toString(), cachedENR);
|
||||
|
||||
if (++cachedENRsInSubnet >= subnetRequest.maxPeersToDiscover) {
|
||||
continue subnet;
|
||||
// only dial attnet/syncnet peers if subnet sampling peers are stable
|
||||
if (groupPeersToDiscover === 0) {
|
||||
for (const cachedENR of cachedENRsReverse) {
|
||||
if (cachedENR.subnets[subnetRequest.type][subnetRequest.subnet]) {
|
||||
cachedENRsToDial.set(cachedENR.peerId.toString(), cachedENR);
|
||||
|
||||
if (++cachedENRsInSubnet >= subnetRequest.maxPeersToDiscover) {
|
||||
continue subnet;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -265,6 +321,8 @@ export class PeerDiscovery {
|
||||
peersToConnect,
|
||||
peersAvailableToDial: cachedENRsToDial.size,
|
||||
subnetsToDiscover: subnetsToDiscoverPeers.length,
|
||||
groupsToDiscover: Array.from(groupsToDiscover).join(","),
|
||||
groupPeersToDiscover,
|
||||
shouldRunFindRandomNodeQuery,
|
||||
});
|
||||
}
|
||||
@@ -317,7 +375,8 @@ export class PeerDiscovery {
|
||||
|
||||
const attnets = zeroAttnets;
|
||||
const syncnets = zeroSyncnets;
|
||||
const status = this.handleDiscoveredPeer(id, multiaddrs[0], attnets, syncnets);
|
||||
|
||||
const status = this.handleDiscoveredPeer(id, multiaddrs[0], attnets, syncnets, undefined);
|
||||
this.logger.debug("Discovered peer via libp2p", {peer: prettyPrintPeerId(id), status});
|
||||
this.metrics?.discovery.discoveredStatus.inc({status});
|
||||
};
|
||||
@@ -340,6 +399,13 @@ export class PeerDiscovery {
|
||||
// Are this fields mandatory?
|
||||
const attnetsBytes = enr.kvs.get(ENRKey.attnets); // 64 bits
|
||||
const syncnetsBytes = enr.kvs.get(ENRKey.syncnets); // 4 bits
|
||||
const custodyGroupCountBytes = enr.kvs.get(ENRKey.cgc); // not preserialized value, is byte representation of number
|
||||
if (custodyGroupCountBytes === undefined) {
|
||||
this.logger.debug("peer discovered with no cgc, using default/miniumn", {
|
||||
custodyRequirement: this.config.CUSTODY_REQUIREMENT,
|
||||
peer: prettyPrintPeerId(peerId),
|
||||
});
|
||||
}
|
||||
|
||||
// Use faster version than ssz's implementation that leverages pre-cached.
|
||||
// Some nodes don't serialize the bitfields properly, encoding the syncnets as attnets,
|
||||
@@ -347,9 +413,14 @@ export class PeerDiscovery {
|
||||
// never throw and treat too long or too short bitfields as zero-ed
|
||||
const attnets = attnetsBytes ? deserializeEnrSubnets(attnetsBytes, ATTESTATION_SUBNET_COUNT) : zeroAttnets;
|
||||
const syncnets = syncnetsBytes ? deserializeEnrSubnets(syncnetsBytes, SYNC_COMMITTEE_SUBNET_COUNT) : zeroSyncnets;
|
||||
const custodyGroupCount = custodyGroupCountBytes ? bytesToInt(custodyGroupCountBytes, "be") : undefined;
|
||||
|
||||
const status = this.handleDiscoveredPeer(peerId, multiaddrTCP, attnets, syncnets);
|
||||
this.logger.debug("Discovered peer via discv5", {peer: prettyPrintPeerId(peerId), status});
|
||||
const status = this.handleDiscoveredPeer(peerId, multiaddrTCP, attnets, syncnets, custodyGroupCount);
|
||||
this.logger.debug("Discovered peer via discv5", {
|
||||
peer: prettyPrintPeerId(peerId),
|
||||
status,
|
||||
cgc: custodyGroupCount,
|
||||
});
|
||||
this.metrics?.discovery.discoveredStatus.inc({status});
|
||||
};
|
||||
|
||||
@@ -360,8 +431,11 @@ export class PeerDiscovery {
|
||||
peerId: PeerId,
|
||||
multiaddrTCP: Multiaddr,
|
||||
attnets: boolean[],
|
||||
syncnets: boolean[]
|
||||
syncnets: boolean[],
|
||||
custodySubnetCount?: number
|
||||
): DiscoveredPeerStatus {
|
||||
const nodeId = computeNodeId(peerId);
|
||||
this.logger.debug("handleDiscoveredPeer", {nodeId: toHex(nodeId), peerId: peerId.toString()});
|
||||
try {
|
||||
// Check if peer is not banned or disconnected
|
||||
if (this.peerRpcScores.getScoreState(peerId) !== ScoreState.Healthy) {
|
||||
@@ -390,12 +464,19 @@ export class PeerDiscovery {
|
||||
return DiscoveredPeerStatus.already_dialing;
|
||||
}
|
||||
|
||||
const forkSeq = this.config.getForkSeq(this.clock.currentSlot);
|
||||
|
||||
// Should dial peer?
|
||||
const cachedPeer: CachedENR = {
|
||||
peerId,
|
||||
multiaddrTCP,
|
||||
subnets: {attnets, syncnets},
|
||||
addedUnixMs: Date.now(),
|
||||
// for pre-fulu, custodyGroups is null
|
||||
custodyGroups:
|
||||
forkSeq >= ForkSeq.fulu
|
||||
? getCustodyGroups(this.config, nodeId, custodySubnetCount ?? this.config.CUSTODY_REQUIREMENT)
|
||||
: null,
|
||||
};
|
||||
|
||||
// Only dial peer if necessary
|
||||
@@ -416,6 +497,34 @@ export class PeerDiscovery {
|
||||
}
|
||||
|
||||
private shouldDialPeer(peer: CachedENR): boolean {
|
||||
const forkSeq = this.config.getForkSeq(this.clock.currentSlot);
|
||||
if (forkSeq >= ForkSeq.fulu && peer.custodyGroups !== null) {
|
||||
// pre-fulu `this.custodyGroupQueries` is empty
|
||||
// starting from fulu, we need to make sure we have stable subnet sampling peers first
|
||||
// given SAMPLES_PER_SLOT = 8 and 100 peers, we have 800 custody columns from peers
|
||||
// with NUMBER_OF_CUSTODY_GROUPS = 128, we have 800 / 128 = 6.25 peers per column in average
|
||||
// it would not be hard to find TARGET_SUBNET_PEERS(6) peers per sampling columns columns and TARGET_GROUP_PEERS_PER_SUBNET(4) peers per non-sampling columns
|
||||
// after some first heartbeats, we should have no more column requested, then go with conditions of prior forks
|
||||
let hasMatchingGroup = false;
|
||||
let custodyGroupRequestCount = 0;
|
||||
for (const [group, peersToConnect] of this.custodyGroupQueries.entries()) {
|
||||
if (peersToConnect <= 0) {
|
||||
this.custodyGroupQueries.delete(group);
|
||||
} else if (peer.custodyGroups.includes(group)) {
|
||||
this.custodyGroupQueries.set(group, Math.max(0, peersToConnect - 1));
|
||||
hasMatchingGroup = true;
|
||||
custodyGroupRequestCount += peersToConnect;
|
||||
}
|
||||
}
|
||||
|
||||
// if subnet sampling peers are not stable and this peer is not in the requested columns, ignore it
|
||||
if (custodyGroupRequestCount > 0 && !hasMatchingGroup) {
|
||||
this.metrics?.discovery.notDialReason.inc({reason: NotDialReason.not_contain_requested_sampling_groups});
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// logics up to Deneb fork
|
||||
for (const type of [SubnetType.attnets, SubnetType.syncnets]) {
|
||||
for (const [subnet, {toUnixMs, peersToConnect}] of this.subnetRequests[type].entries()) {
|
||||
if (toUnixMs < Date.now() || peersToConnect === 0) {
|
||||
@@ -441,6 +550,7 @@ export class PeerDiscovery {
|
||||
return true;
|
||||
}
|
||||
|
||||
this.metrics?.discovery.notDialReason.inc({reason: NotDialReason.not_contain_requested_attnet_syncnet_subnets});
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,20 +2,22 @@ import {BitArray} from "@chainsafe/ssz";
|
||||
import {Connection, PeerId, PrivateKey} from "@libp2p/interface";
|
||||
import {BeaconConfig} from "@lodestar/config";
|
||||
import {LoggerNode} from "@lodestar/logger/node";
|
||||
import {SLOTS_PER_EPOCH, SYNC_COMMITTEE_SUBNET_COUNT} from "@lodestar/params";
|
||||
import {Metadata, altair, phase0} from "@lodestar/types";
|
||||
import {withTimeout} from "@lodestar/utils";
|
||||
import {ForkSeq, SLOTS_PER_EPOCH, SYNC_COMMITTEE_SUBNET_COUNT} from "@lodestar/params";
|
||||
import {Metadata, Status, altair, fulu, phase0} from "@lodestar/types";
|
||||
import {prettyPrintIndices, toHex, withTimeout} from "@lodestar/utils";
|
||||
import {GOODBYE_KNOWN_CODES, GoodByeReasonCode, Libp2pEvent} from "../../constants/index.js";
|
||||
import {IClock} from "../../util/clock.js";
|
||||
import {getCustodyGroups, getDataColumns} from "../../util/dataColumns.js";
|
||||
import {NetworkCoreMetrics} from "../core/metrics.js";
|
||||
import {LodestarDiscv5Opts} from "../discv5/types.js";
|
||||
import {INetworkEventBus, NetworkEvent, NetworkEventData} from "../events.js";
|
||||
import {Eth2Gossipsub} from "../gossip/gossipsub.js";
|
||||
import {Libp2p} from "../interface.js";
|
||||
import {SubnetType} from "../metadata.js";
|
||||
import {NetworkConfig} from "../networkConfig.js";
|
||||
import {ReqRespMethod} from "../reqresp/ReqRespBeaconNode.js";
|
||||
import {StatusCache} from "../statusCache.js";
|
||||
import {SubnetsService} from "../subnets/index.js";
|
||||
import {NodeId, SubnetsService, computeNodeId} from "../subnets/index.js";
|
||||
import {getConnection, getConnectionsMap, prettyPrintPeerId, prettyPrintPeerIdStr} from "../util.js";
|
||||
import {ClientKind, getKnownClientFromAgentVersion} from "./client.js";
|
||||
import {PeerDiscovery, SubnetDiscvQueryMs} from "./discover.js";
|
||||
@@ -74,6 +76,8 @@ export type PeerManagerOpts = {
|
||||
targetPeers: number;
|
||||
/** The maximum number of peers we allow (exceptions for subnet peers) */
|
||||
maxPeers: number;
|
||||
/** Target peer per PeerDAS group */
|
||||
targetGroupPeers: number;
|
||||
/**
|
||||
* Delay the 1st query after starting discv5
|
||||
* See https://github.com/ChainSafe/lodestar/issues/3423
|
||||
@@ -94,7 +98,7 @@ export type PeerManagerOpts = {
|
||||
*/
|
||||
export interface IReqRespBeaconNodePeerManager {
|
||||
sendPing(peerId: PeerId): Promise<phase0.Ping>;
|
||||
sendStatus(peerId: PeerId, request: phase0.Status): Promise<phase0.Status>;
|
||||
sendStatus(peerId: PeerId, request: Status): Promise<Status>;
|
||||
sendGoodbye(peerId: PeerId, request: phase0.Goodbye): Promise<void>;
|
||||
sendMetadata(peerId: PeerId): Promise<Metadata>;
|
||||
}
|
||||
@@ -109,15 +113,18 @@ export type PeerManagerModules = {
|
||||
attnetsService: SubnetsService;
|
||||
syncnetsService: SubnetsService;
|
||||
clock: IClock;
|
||||
config: BeaconConfig;
|
||||
peerRpcScores: IPeerRpcScoreStore;
|
||||
events: INetworkEventBus;
|
||||
networkConfig: NetworkConfig;
|
||||
peersData: PeersData;
|
||||
statusCache: StatusCache;
|
||||
};
|
||||
|
||||
export type PeerRequestedSubnetType = SubnetType | "column";
|
||||
|
||||
type PeerIdStr = string;
|
||||
|
||||
// TODO(fulu): dedupe with network/peers/peerData.ts
|
||||
enum RelevantPeerStatus {
|
||||
Unknown = "unknown",
|
||||
relevant = "relevant",
|
||||
@@ -133,6 +140,7 @@ enum RelevantPeerStatus {
|
||||
* - Disconnect peers if over target peers
|
||||
*/
|
||||
export class PeerManager {
|
||||
private nodeId: NodeId;
|
||||
private readonly libp2p: Libp2p;
|
||||
private readonly logger: LoggerNode;
|
||||
private readonly metrics: NetworkCoreMetrics | null;
|
||||
@@ -141,13 +149,14 @@ export class PeerManager {
|
||||
private readonly attnetsService: SubnetsService;
|
||||
private readonly syncnetsService: SubnetsService;
|
||||
private readonly clock: IClock;
|
||||
private readonly networkConfig: NetworkConfig;
|
||||
private readonly config: BeaconConfig;
|
||||
private readonly peerRpcScores: IPeerRpcScoreStore;
|
||||
/** If null, discovery is disabled */
|
||||
private readonly discovery: PeerDiscovery | null;
|
||||
private readonly networkEventBus: INetworkEventBus;
|
||||
private readonly statusCache: StatusCache;
|
||||
private lastStatus: phase0.Status;
|
||||
private lastStatus: Status;
|
||||
|
||||
// A single map of connected peers with all necessary data to handle PINGs, STATUS, and metrics
|
||||
private connectedPeers: Map<PeerIdStr, PeerData>;
|
||||
@@ -156,6 +165,7 @@ export class PeerManager {
|
||||
private intervals: NodeJS.Timeout[] = [];
|
||||
|
||||
constructor(modules: PeerManagerModules, opts: PeerManagerOpts, discovery: PeerDiscovery | null) {
|
||||
const {networkConfig} = modules;
|
||||
this.libp2p = modules.libp2p;
|
||||
this.logger = modules.logger;
|
||||
this.metrics = modules.metrics;
|
||||
@@ -165,12 +175,14 @@ export class PeerManager {
|
||||
this.syncnetsService = modules.syncnetsService;
|
||||
this.statusCache = modules.statusCache;
|
||||
this.clock = modules.clock;
|
||||
this.config = modules.config;
|
||||
this.networkConfig = networkConfig;
|
||||
this.config = networkConfig.config;
|
||||
this.peerRpcScores = modules.peerRpcScores;
|
||||
this.networkEventBus = modules.events;
|
||||
this.connectedPeers = modules.peersData.connectedPeers;
|
||||
this.opts = opts;
|
||||
this.discovery = discovery;
|
||||
this.nodeId = networkConfig.nodeId;
|
||||
|
||||
const {metrics} = modules;
|
||||
if (metrics) {
|
||||
@@ -317,12 +329,40 @@ export class PeerManager {
|
||||
// Store metadata always in case the peer updates attnets but not the sequence number
|
||||
// Trust that the peer always sends the latest metadata (From Lighthouse)
|
||||
const peerData = this.connectedPeers.get(peer.toString());
|
||||
this.logger.debug("onMetadata", {
|
||||
peer: peer.toString(),
|
||||
peerData: peerData !== undefined,
|
||||
custodyGroupCount: (metadata as Partial<fulu.Metadata>)?.custodyGroupCount,
|
||||
});
|
||||
if (peerData) {
|
||||
const oldMetadata = peerData.metadata;
|
||||
const custodyGroupCount =
|
||||
(metadata as Partial<fulu.Metadata>).custodyGroupCount ?? this.config.CUSTODY_REQUIREMENT;
|
||||
const samplingGroupCount = Math.max(this.config.SAMPLES_PER_SLOT, custodyGroupCount);
|
||||
const nodeId = peerData?.nodeId ?? computeNodeId(peer);
|
||||
const custodyGroups =
|
||||
oldMetadata == null || oldMetadata.custodyGroups == null || custodyGroupCount !== oldMetadata.custodyGroupCount
|
||||
? getCustodyGroups(this.config, nodeId, custodyGroupCount)
|
||||
: oldMetadata.custodyGroups;
|
||||
const oldSamplingGroupCount = Math.max(this.config.SAMPLES_PER_SLOT, oldMetadata?.custodyGroupCount ?? 0);
|
||||
const samplingGroups =
|
||||
oldMetadata == null || oldMetadata.samplingGroups == null || samplingGroupCount !== oldSamplingGroupCount
|
||||
? getCustodyGroups(this.config, nodeId, samplingGroupCount)
|
||||
: oldMetadata.samplingGroups;
|
||||
peerData.metadata = {
|
||||
seqNumber: metadata.seqNumber,
|
||||
attnets: metadata.attnets,
|
||||
syncnets: (metadata as Partial<altair.Metadata>).syncnets ?? BitArray.fromBitLen(SYNC_COMMITTEE_SUBNET_COUNT),
|
||||
custodyGroupCount:
|
||||
(metadata as Partial<fulu.Metadata>).custodyGroupCount ??
|
||||
// TODO: spec says that Clients MAY reject peers with a value less than CUSTODY_REQUIREMENT
|
||||
this.config.CUSTODY_REQUIREMENT,
|
||||
custodyGroups,
|
||||
samplingGroups,
|
||||
};
|
||||
if (oldMetadata === null || oldMetadata.custodyGroupCount !== peerData.metadata.custodyGroupCount) {
|
||||
void this.requestStatus(peer, this.statusCache.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -345,7 +385,7 @@ export class PeerManager {
|
||||
/**
|
||||
* Handle a STATUS request + response (rpc handler responds with STATUS automatically)
|
||||
*/
|
||||
private onStatus(peer: PeerId, status: phase0.Status): void {
|
||||
private onStatus(peer: PeerId, status: Status): void {
|
||||
// reset the to-status timer of this peer
|
||||
const peerData = this.connectedPeers.get(peer.toString());
|
||||
if (peerData) {
|
||||
@@ -353,9 +393,16 @@ export class PeerManager {
|
||||
peerData.status = status;
|
||||
}
|
||||
|
||||
const forkName = this.config.getForkName(this.clock.currentSlot);
|
||||
|
||||
let isIrrelevant: boolean;
|
||||
try {
|
||||
const irrelevantReasonType = assertPeerRelevance(status, this.statusCache.get(), this.clock.currentSlot);
|
||||
const irrelevantReasonType = assertPeerRelevance(
|
||||
forkName,
|
||||
status,
|
||||
this.statusCache.get(),
|
||||
this.clock.currentSlot
|
||||
);
|
||||
if (irrelevantReasonType === null) {
|
||||
isIrrelevant = false;
|
||||
} else {
|
||||
@@ -389,34 +436,70 @@ export class PeerManager {
|
||||
peerData.relevantStatus = RelevantPeerStatus.relevant;
|
||||
}
|
||||
if (getConnection(this.libp2p, peer.toString())) {
|
||||
this.networkEventBus.emit(NetworkEvent.peerConnected, {peer: peer.toString(), status});
|
||||
const nodeId = peerData?.nodeId ?? computeNodeId(peer);
|
||||
// TODO(fulu): Are we sure we've run Metadata before this?
|
||||
const custodyGroupCount = peerData?.metadata?.custodyGroupCount ?? this.config.CUSTODY_REQUIREMENT;
|
||||
const custodyGroups =
|
||||
peerData?.metadata?.custodyGroups ?? getCustodyGroups(this.config, nodeId, custodyGroupCount);
|
||||
const dataColumns = getDataColumns(this.config, nodeId, custodyGroupCount);
|
||||
|
||||
const sampleSubnets = this.networkConfig.custodyConfig.sampledSubnets;
|
||||
const matchingSubnetsNum = sampleSubnets.reduce((acc, elem) => acc + (dataColumns.includes(elem) ? 1 : 0), 0);
|
||||
const hasAllColumns = matchingSubnetsNum === sampleSubnets.length;
|
||||
const clientAgent = peerData?.agentClient ?? ClientKind.Unknown;
|
||||
|
||||
this.logger.debug("onStatus", {
|
||||
nodeId: toHex(nodeId),
|
||||
myNodeId: toHex(this.nodeId),
|
||||
peerId: peer.toString(),
|
||||
custodyGroupCount,
|
||||
hasAllColumns,
|
||||
dataColumns: prettyPrintIndices(dataColumns),
|
||||
matchingSubnetsNum,
|
||||
custodyGroups: prettyPrintIndices(custodyGroups),
|
||||
mySampleSubnets: prettyPrintIndices(sampleSubnets),
|
||||
clientAgent,
|
||||
});
|
||||
|
||||
this.networkEventBus.emit(NetworkEvent.peerConnected, {
|
||||
peer: peer.toString(),
|
||||
status,
|
||||
clientAgent,
|
||||
custodyGroups,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private async requestMetadata(peer: PeerId): Promise<void> {
|
||||
const peerIdStr = peer.toString();
|
||||
try {
|
||||
this.onMetadata(peer, await this.reqResp.sendMetadata(peer));
|
||||
} catch (_e) {
|
||||
} catch (e) {
|
||||
this.logger.verbose("invalid requestMetadata", {peer: prettyPrintPeerIdStr(peerIdStr)}, e as Error);
|
||||
// TODO: Downvote peer here or in the reqResp layer
|
||||
}
|
||||
}
|
||||
|
||||
private async requestPing(peer: PeerId): Promise<void> {
|
||||
const peerIdStr = peer.toString();
|
||||
try {
|
||||
this.onPing(peer, await this.reqResp.sendPing(peer));
|
||||
|
||||
// If peer replies a PING request also update lastReceivedMsg
|
||||
const peerData = this.connectedPeers.get(peer.toString());
|
||||
if (peerData) peerData.lastReceivedMsgUnixTsMs = Date.now();
|
||||
} catch (_e) {
|
||||
} catch (e) {
|
||||
this.logger.verbose("invalid requestPing", {peer: prettyPrintPeerIdStr(peerIdStr)}, e as Error);
|
||||
// TODO: Downvote peer here or in the reqResp layer
|
||||
}
|
||||
}
|
||||
|
||||
private async requestStatus(peer: PeerId, localStatus: phase0.Status): Promise<void> {
|
||||
private async requestStatus(peer: PeerId, localStatus: Status): Promise<void> {
|
||||
const peerIdStr = peer.toString();
|
||||
try {
|
||||
this.onStatus(peer, await this.reqResp.sendStatus(peer, localStatus));
|
||||
} catch (_e) {
|
||||
} catch (e) {
|
||||
this.logger.verbose("invalid requestStatus", {peer: prettyPrintPeerIdStr(peerIdStr)}, e as Error);
|
||||
// TODO: Failed to get peer latest status: downvote but don't disconnect
|
||||
}
|
||||
}
|
||||
@@ -467,8 +550,9 @@ export class PeerManager {
|
||||
this.clock.currentSlot - status.headSlot > STARVATION_THRESHOLD_SLOTS;
|
||||
this.lastStatus = status;
|
||||
this.metrics?.peerManager.starved.set(starved ? 1 : 0);
|
||||
const forkSeq = this.config.getForkSeq(this.clock.currentSlot);
|
||||
|
||||
const {peersToDisconnect, peersToConnect, attnetQueries, syncnetQueries} = prioritizePeers(
|
||||
const {peersToDisconnect, peersToConnect, attnetQueries, syncnetQueries, custodyGroupQueries} = prioritizePeers(
|
||||
connectedHealthyPeers.map((peer) => {
|
||||
const peerData = this.connectedPeers.get(peer.toString());
|
||||
return {
|
||||
@@ -477,19 +561,25 @@ export class PeerManager {
|
||||
status: peerData?.status ?? null,
|
||||
attnets: peerData?.metadata?.attnets ?? null,
|
||||
syncnets: peerData?.metadata?.syncnets ?? null,
|
||||
// here we care samplingGroups not custodyGroups in order to know which column subnets peers subscribe to
|
||||
samplingGroups: peerData?.metadata?.samplingGroups ?? null,
|
||||
score: this.peerRpcScores.getScore(peer),
|
||||
};
|
||||
}),
|
||||
// Collect subnets which we need peers for in the current slot
|
||||
this.attnetsService.getActiveSubnets(),
|
||||
this.syncnetsService.getActiveSubnets(),
|
||||
// ignore samplingGroups for pre-fulu forks
|
||||
forkSeq >= ForkSeq.fulu ? this.networkConfig.custodyConfig.sampleGroups : undefined,
|
||||
{
|
||||
...this.opts,
|
||||
status,
|
||||
starved,
|
||||
starvationPruneRatio: STARVATION_PRUNE_RATIO,
|
||||
starvationThresholdSlots: STARVATION_THRESHOLD_SLOTS,
|
||||
}
|
||||
},
|
||||
this.config,
|
||||
this.metrics
|
||||
);
|
||||
|
||||
const queriesMerged: SubnetDiscvQueryMs[] = [];
|
||||
@@ -514,6 +604,11 @@ export class PeerManager {
|
||||
}
|
||||
}
|
||||
|
||||
for (const maxPeersToDiscover of custodyGroupQueries.values()) {
|
||||
this.metrics?.peersRequestedSubnetsToQuery.inc({type: "column"}, 1);
|
||||
this.metrics?.peersRequestedSubnetsPeerCount.inc({type: "column"}, maxPeersToDiscover);
|
||||
}
|
||||
|
||||
// disconnect first to have more slots before we dial new peers
|
||||
for (const [reason, peers] of peersToDisconnect) {
|
||||
this.metrics?.peersRequestedToDisconnect.inc({reason}, peers.length);
|
||||
@@ -525,7 +620,8 @@ export class PeerManager {
|
||||
if (this.discovery) {
|
||||
try {
|
||||
this.metrics?.peersRequestedToConnect.inc(peersToConnect);
|
||||
this.discovery.discoverPeers(peersToConnect, queriesMerged);
|
||||
// for PeerDAS, lodestar implements subnet sampling strategy, hence we need to issue columnSubnetQueries to PeerDiscovery
|
||||
this.discovery.discoverPeers(peersToConnect, custodyGroupQueries, queriesMerged);
|
||||
} catch (e) {
|
||||
this.logger.error("Error on discoverPeers", {}, e as Error);
|
||||
}
|
||||
@@ -600,12 +696,17 @@ export class PeerManager {
|
||||
*/
|
||||
private onLibp2pPeerConnect = async (evt: CustomEvent<Connection>): Promise<void> => {
|
||||
const {direction, status, remotePeer} = evt.detail;
|
||||
this.logger.verbose("peer connected", {peer: prettyPrintPeerId(remotePeer), direction, status});
|
||||
const remotePeerStr = remotePeer.toString();
|
||||
const remotePeerPrettyStr = prettyPrintPeerId(remotePeer);
|
||||
this.logger.verbose("peer connected", {peer: remotePeerPrettyStr, direction, status});
|
||||
// NOTE: The peerConnect event is not emitted here here, but after asserting peer relevance
|
||||
this.metrics?.peerConnectedEvent.inc({direction, status});
|
||||
// libp2p may emit closed connection, we don't want to handle it
|
||||
// see https://github.com/libp2p/js-libp2p/issues/1565
|
||||
if (this.connectedPeers.has(remotePeer.toString()) || status !== "open") {
|
||||
|
||||
if (evt.detail.status !== "open") {
|
||||
this.logger.debug("Peer disconnected before identify protocol initiated", {
|
||||
peerId: remotePeerPrettyStr,
|
||||
status: evt.detail.status,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -615,6 +716,7 @@ export class PeerManager {
|
||||
// NOTE: libp2p may emit two "peer:connect" events: One for inbound, one for outbound
|
||||
// If that happens, it's okay. Only the "outbound" connection triggers immediate action
|
||||
const now = Date.now();
|
||||
const nodeId = computeNodeId(remotePeer);
|
||||
const peerData: PeerData = {
|
||||
lastReceivedMsgUnixTsMs: direction === "outbound" ? 0 : now,
|
||||
// If inbound, request after STATUS_INBOUND_GRACE_PERIOD
|
||||
@@ -622,6 +724,7 @@ export class PeerManager {
|
||||
connectedUnixTsMs: now,
|
||||
relevantStatus: RelevantPeerStatus.Unknown,
|
||||
direction,
|
||||
nodeId,
|
||||
peerId: remotePeer,
|
||||
status: null,
|
||||
metadata: null,
|
||||
@@ -629,10 +732,10 @@ export class PeerManager {
|
||||
agentClient: null,
|
||||
encodingPreference: null,
|
||||
};
|
||||
this.connectedPeers.set(remotePeer.toString(), peerData);
|
||||
this.connectedPeers.set(remotePeerStr, peerData);
|
||||
|
||||
if (direction === "outbound") {
|
||||
//this.pingAndStatusTimeouts();
|
||||
// this.pingAndStatusTimeouts();
|
||||
void this.requestPing(remotePeer);
|
||||
void this.requestStatus(remotePeer, this.statusCache.get());
|
||||
}
|
||||
@@ -647,7 +750,14 @@ export class PeerManager {
|
||||
}
|
||||
})
|
||||
.catch((err) => {
|
||||
this.logger.debug("Error setting agentVersion for the peer", {peerId: peerData.peerId.toString()}, err);
|
||||
if (evt.detail.status !== "open") {
|
||||
this.logger.debug("Peer disconnected during identify protocol", {
|
||||
peerId: remotePeerPrettyStr,
|
||||
error: (err as Error).message,
|
||||
});
|
||||
} else {
|
||||
this.logger.debug("Error setting agentVersion for the peer", {peerId: remotePeerPrettyStr}, err);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
@@ -697,6 +807,7 @@ export class PeerManager {
|
||||
const peerIdStr = peer.toString();
|
||||
try {
|
||||
this.metrics?.peerGoodbyeSent.inc({reason});
|
||||
this.logger.debug("initiating goodbyeAndDisconnect peer", {reason, peerId: prettyPrintPeerId(peer)});
|
||||
|
||||
const conn = getConnection(this.libp2p, peerIdStr);
|
||||
if (conn && Date.now() - conn.timeline.open > LONG_PEER_CONNECTION_MS) {
|
||||
@@ -755,6 +866,7 @@ export class PeerManager {
|
||||
|
||||
// TODO: Consider optimizing by doing observe in batch
|
||||
metrics.peerLongLivedAttnets.observe(attnets ? attnets.getTrueBitIndexes().length : 0);
|
||||
metrics.peerColumnGroupCount.observe(peerData?.metadata?.custodyGroupCount ?? 0);
|
||||
metrics.peerScoreByClient.observe({client}, this.peerRpcScores.getScore(peerId));
|
||||
metrics.peerGossipScoreByClient.observe({client}, this.peerRpcScores.getGossipScore(peerId));
|
||||
metrics.peerConnectionLength.observe((now - openCnx.timeline.open) / 1000);
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
import {PeerId} from "@libp2p/interface";
|
||||
import {Encoding} from "@lodestar/reqresp";
|
||||
import {altair, phase0} from "@lodestar/types";
|
||||
import {CustodyIndex, Slot, Status, fulu} from "@lodestar/types";
|
||||
import {NodeId} from "../subnets/interface.js";
|
||||
import {ClientKind} from "./client.js";
|
||||
|
||||
type PeerIdStr = string;
|
||||
type Metadata = fulu.Metadata & {custodyGroups: CustodyIndex[]; samplingGroups: CustodyIndex[]};
|
||||
export type PeerSyncMeta = {
|
||||
peerId: PeerIdStr;
|
||||
client: string;
|
||||
custodyGroups: CustodyIndex[];
|
||||
earliestAvailableSlot?: Slot;
|
||||
};
|
||||
|
||||
export enum RelevantPeerStatus {
|
||||
Unknown = "unknown",
|
||||
@@ -18,8 +26,9 @@ export type PeerData = {
|
||||
relevantStatus: RelevantPeerStatus;
|
||||
direction: "inbound" | "outbound";
|
||||
peerId: PeerId;
|
||||
status: phase0.Status | null;
|
||||
metadata: altair.Metadata | null;
|
||||
nodeId: NodeId | null;
|
||||
metadata: Metadata | null;
|
||||
status: Status | null;
|
||||
agentVersion: string | null;
|
||||
agentClient: ClientKind | null;
|
||||
encodingPreference: Encoding | null;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user