mirror of
https://github.com/ChainSafe/lodestar.git
synced 2026-01-10 08:08:16 -05:00
Merge branch 'unstable' into mkeil/merge-unstable-to-peerDAS-june3
This commit is contained in:
@@ -3,9 +3,7 @@
|
||||
{
|
||||
"name": "Node.js & TypeScript",
|
||||
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||
// Upgrade the container to Node 22
|
||||
// https://github.com/ChainSafe/lodestar/issues/6742
|
||||
"image": "mcr.microsoft.com/devcontainers/typescript-node:1-20-bullseye",
|
||||
"image": "mcr.microsoft.com/devcontainers/typescript-node:1-22-bullseye",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/python:1": {}
|
||||
}
|
||||
|
||||
22
.github/actions/setup-and-build/action.yml
vendored
22
.github/actions/setup-and-build/action.yml
vendored
@@ -13,6 +13,8 @@ runs:
|
||||
with:
|
||||
node-version: ${{inputs.node}}
|
||||
check-latest: true
|
||||
# The hash of yarn.lock file will be used as cache key
|
||||
# So unless our dependencies change, we can reuse the cache
|
||||
cache: yarn
|
||||
|
||||
- name: Node.js version
|
||||
@@ -20,28 +22,26 @@ runs:
|
||||
shell: bash
|
||||
run: echo "v8CppApiVersion=$(node --print "process.versions.modules")" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create cache key
|
||||
id: build-cache
|
||||
shell: bash
|
||||
# This build cache will be reused among different jobs for the same commit
|
||||
run: echo "key=build-cache-${{ runner.os }}-${{ runner.arch }}-node-${{ inputs.node }}-${{ github.sha }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore build
|
||||
uses: actions/cache/restore@v4
|
||||
id: cache-build-restore
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
packages/*/node_modules
|
||||
lib/
|
||||
packages/*/lib
|
||||
packages/*/.git-data.json
|
||||
key: ${{ runner.os }}-${{ runner.arch }}-node-${{ inputs.node }}-${{ github.sha }}
|
||||
key: ${{ steps.build-cache.outputs.key }}
|
||||
|
||||
- name: Install & build
|
||||
if: steps.cache-build-restore.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
run: yarn install --frozen-lockfile && yarn build
|
||||
|
||||
- name: Build
|
||||
if: steps.cache-build-restore.outputs.cache-hit == 'true'
|
||||
shell: bash
|
||||
run: yarn build
|
||||
|
||||
- name: Check Build
|
||||
shell: bash
|
||||
run: yarn check-build
|
||||
@@ -58,9 +58,7 @@ runs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
packages/*/node_modules
|
||||
lib/
|
||||
packages/*/lib
|
||||
packages/*/.git-data.json
|
||||
key: ${{ runner.os }}-${{ runner.arch }}-node-${{ inputs.node }}-${{ github.sha }}
|
||||
key: ${{ steps.build-cache.outputs.key }}
|
||||
|
||||
@@ -53,6 +53,330 @@
|
||||
],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 550,
|
||||
"panels": [],
|
||||
"title": "Aggregated Attestation Pool",
|
||||
"type": "row"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 1
|
||||
},
|
||||
"id": 551,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "lodestar_oppool_aggregated_attestation_pool_size",
|
||||
"instant": false,
|
||||
"legendFormat": "size",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "lodestar_oppool_aggregated_attestation_pool_unique_data_count",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "unique_data",
|
||||
"range": true,
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "lodestar_oppool_aggregated_attestation_pool_attestation_data_per_slot_total",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "att_data_per_slot",
|
||||
"range": true,
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "lodestar_oppool_aggregated_attestation_pool_committees_per_slot_total",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "committees_per_slot",
|
||||
"range": true,
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "lodestar_oppool_aggregated_attestation_pool_max_attestations_per_committee",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "max_attestations_per_committee",
|
||||
"range": true,
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "lodestar_oppool_aggregated_attestation_pool_attestations_per_committee",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "attestations_per_committee",
|
||||
"range": true,
|
||||
"refId": "F"
|
||||
}
|
||||
],
|
||||
"title": "Aggregated Attestation Pool",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 1
|
||||
},
|
||||
"id": 552,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rate(lodestar_oppool_aggregated_attestation_pool_gossip_insert_outcome_total[$rate_interval])",
|
||||
"instant": false,
|
||||
"legendFormat": "{{insertOutcome}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Gossip Insert Outcome",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 9
|
||||
},
|
||||
"id": 553,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rate(lodestar_oppool_aggregated_attestation_pool_api_insert_outcome_total[$rate_interval])",
|
||||
"instant": false,
|
||||
"legendFormat": "{{insertOutcome}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Api Insert Outcome",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"datasource": {
|
||||
@@ -63,7 +387,7 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
"y": 17
|
||||
},
|
||||
"id": 166,
|
||||
"panels": [],
|
||||
@@ -130,7 +454,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 1
|
||||
"y": 18
|
||||
},
|
||||
"id": 546,
|
||||
"options": {
|
||||
@@ -306,7 +630,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 1
|
||||
"y": 18
|
||||
},
|
||||
"id": 547,
|
||||
"options": {
|
||||
@@ -548,7 +872,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 9
|
||||
"y": 26
|
||||
},
|
||||
"id": 168,
|
||||
"options": {
|
||||
@@ -673,7 +997,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 9
|
||||
"y": 26
|
||||
},
|
||||
"id": 170,
|
||||
"options": {
|
||||
@@ -757,7 +1081,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 17
|
||||
"y": 34
|
||||
},
|
||||
"id": 528,
|
||||
"options": {
|
||||
@@ -821,7 +1145,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 17
|
||||
"y": 34
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": false,
|
||||
@@ -950,7 +1274,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 25
|
||||
"y": 42
|
||||
},
|
||||
"id": 511,
|
||||
"options": {
|
||||
@@ -1117,7 +1441,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 25
|
||||
"y": 42
|
||||
},
|
||||
"id": 378,
|
||||
"options": {
|
||||
@@ -1203,7 +1527,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 33
|
||||
"y": 50
|
||||
},
|
||||
"id": 376,
|
||||
"options": {
|
||||
@@ -1288,7 +1612,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 33
|
||||
"y": 50
|
||||
},
|
||||
"id": 532,
|
||||
"options": {
|
||||
@@ -1391,7 +1715,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 41
|
||||
"y": 58
|
||||
},
|
||||
"id": 531,
|
||||
"options": {
|
||||
@@ -1476,7 +1800,7 @@
|
||||
"h": 7,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 41
|
||||
"y": 58
|
||||
},
|
||||
"id": 534,
|
||||
"options": {
|
||||
@@ -1527,7 +1851,7 @@
|
||||
"h": 6,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 48
|
||||
"y": 65
|
||||
},
|
||||
"id": 535,
|
||||
"options": {
|
||||
@@ -1641,7 +1965,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 48
|
||||
"y": 65
|
||||
},
|
||||
"id": 537,
|
||||
"options": {
|
||||
@@ -1734,7 +2058,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 54
|
||||
"y": 71
|
||||
},
|
||||
"id": 548,
|
||||
"options": {
|
||||
@@ -1818,7 +2142,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 56
|
||||
"y": 73
|
||||
},
|
||||
"id": 549,
|
||||
"options": {
|
||||
@@ -1858,7 +2182,7 @@
|
||||
"h": 1,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 64
|
||||
"y": 81
|
||||
},
|
||||
"id": 541,
|
||||
"panels": [],
|
||||
@@ -1889,7 +2213,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 65
|
||||
"y": 82
|
||||
},
|
||||
"id": 543,
|
||||
"options": {
|
||||
@@ -1968,7 +2292,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 65
|
||||
"y": 82
|
||||
},
|
||||
"id": 545,
|
||||
"options": {
|
||||
@@ -2073,7 +2397,7 @@
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 73
|
||||
"y": 90
|
||||
},
|
||||
"id": 539,
|
||||
"options": {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -868,6 +868,88 @@
|
||||
"title": "Committee Subscriptions - Seconds to stable mesh",
|
||||
"type": "heatmap"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisBorderShow": false,
|
||||
"axisCenteredZero": false,
|
||||
"axisColorMode": "text",
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"insertNulls": false,
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 30
|
||||
},
|
||||
"id": 628,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "lodestar_gossip_mesh_peers_by_client_count",
|
||||
"instant": false,
|
||||
"legendFormat": "{{client}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Mesh peers by client",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"collapsed": false,
|
||||
"gridPos": {
|
||||
|
||||
@@ -70,8 +70,8 @@
|
||||
"check-readme": "typescript-docs-verifier"
|
||||
},
|
||||
"dependencies": {
|
||||
"@chainsafe/persistent-merkle-tree": "^1.1.0",
|
||||
"@chainsafe/ssz": "^1.2.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.2.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@lodestar/config": "^1.30.0",
|
||||
"@lodestar/params": "^1.30.0",
|
||||
"@lodestar/types": "^1.30.0",
|
||||
|
||||
@@ -92,17 +92,17 @@
|
||||
"check-readme": "typescript-docs-verifier"
|
||||
},
|
||||
"dependencies": {
|
||||
"@chainsafe/as-sha256": "^1.1.0",
|
||||
"@chainsafe/as-sha256": "^1.2.0",
|
||||
"@chainsafe/blst": "^2.2.0",
|
||||
"@chainsafe/discv5": "^11.0.0",
|
||||
"@chainsafe/enr": "^5.0.0",
|
||||
"@chainsafe/libp2p-gossipsub": "^14.1.1",
|
||||
"@chainsafe/libp2p-noise": "^16.1.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.1.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.2.0",
|
||||
"@chainsafe/prometheus-gc-stats": "^1.0.0",
|
||||
"@chainsafe/pubkey-index-map": "^3.0.0",
|
||||
"@chainsafe/ssz": "^1.2.0",
|
||||
"@chainsafe/threads": "^1.11.1",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@chainsafe/threads": "^1.11.2",
|
||||
"@ethersproject/abi": "^5.7.0",
|
||||
"@fastify/bearer-auth": "^10.0.1",
|
||||
"@fastify/cors": "^10.0.1",
|
||||
|
||||
@@ -46,6 +46,7 @@ import {computeBlobSidecars, computeDataColumnSidecars} from "../../../../util/b
|
||||
import {isOptimisticBlock} from "../../../../util/forkChoice.js";
|
||||
import {promiseAllMaybeAsync} from "../../../../util/promises.js";
|
||||
import {ApiModules} from "../../types.js";
|
||||
import {assertUniqueItems} from "../../utils.js";
|
||||
import {getBlockResponse, toBeaconHeaderResponse} from "./utils.js";
|
||||
|
||||
type PublishBlockOpts = ImportBlockOpts;
|
||||
@@ -523,6 +524,8 @@ export function getBeaconBlockApi({
|
||||
},
|
||||
|
||||
async getBlobSidecars({blockId, indices}) {
|
||||
assertUniqueItems(indices, "Duplicate indices provided");
|
||||
|
||||
const {block, executionOptimistic, finalized} = await getBlockResponse(chain, blockId);
|
||||
const blockRoot = config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message);
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import {routes} from "@lodestar/api";
|
||||
import {ApplicationMethods} from "@lodestar/api/server";
|
||||
import {ApiModules} from "../../types.js";
|
||||
import {assertUniqueItems} from "../../utils.js";
|
||||
import {getBlockResponse} from "../blocks/utils.js";
|
||||
|
||||
export function getBeaconRewardsApi({
|
||||
@@ -13,10 +14,14 @@ export function getBeaconRewardsApi({
|
||||
return {data, meta: {executionOptimistic, finalized}};
|
||||
},
|
||||
async getAttestationsRewards({epoch, validatorIds}) {
|
||||
assertUniqueItems(validatorIds, "Duplicate validator IDs provided");
|
||||
|
||||
const {rewards, executionOptimistic, finalized} = await chain.getAttestationsRewards(epoch, validatorIds);
|
||||
return {data: rewards, meta: {executionOptimistic, finalized}};
|
||||
},
|
||||
async getSyncCommitteeRewards({blockId, validatorIds}) {
|
||||
assertUniqueItems(validatorIds, "Duplicate validator IDs provided");
|
||||
|
||||
const {block, executionOptimistic, finalized} = await getBlockResponse(chain, blockId);
|
||||
const data = await chain.getSyncCommitteeRewards(block.message, validatorIds);
|
||||
return {data, meta: {executionOptimistic, finalized}};
|
||||
|
||||
@@ -15,6 +15,7 @@ import {getValidatorStatus} from "@lodestar/types";
|
||||
import {fromHex} from "@lodestar/utils";
|
||||
import {ApiError} from "../../errors.js";
|
||||
import {ApiModules} from "../../types.js";
|
||||
import {assertUniqueItems} from "../../utils.js";
|
||||
import {
|
||||
filterStateValidatorsByStatus,
|
||||
getStateResponseWithRegen,
|
||||
@@ -92,6 +93,8 @@ export function getBeaconStateApi({
|
||||
|
||||
const validatorResponses: routes.beacon.ValidatorResponse[] = [];
|
||||
if (validatorIds.length) {
|
||||
assertUniqueItems(validatorIds, "Duplicate validator IDs provided");
|
||||
|
||||
for (const id of validatorIds) {
|
||||
const resp = getStateValidatorIndex(id, state, pubkey2index);
|
||||
if (resp.valid) {
|
||||
@@ -116,6 +119,8 @@ export function getBeaconStateApi({
|
||||
}
|
||||
|
||||
if (statuses.length) {
|
||||
assertUniqueItems(statuses, "Duplicate statuses provided");
|
||||
|
||||
const validatorsByStatus = filterStateValidatorsByStatus(statuses, state, pubkey2index, currentEpoch);
|
||||
return {
|
||||
data: validatorsByStatus,
|
||||
@@ -148,6 +153,8 @@ export function getBeaconStateApi({
|
||||
let validatorIdentities: routes.beacon.ValidatorIdentities;
|
||||
|
||||
if (validatorIds.length) {
|
||||
assertUniqueItems(validatorIds, "Duplicate validator IDs provided");
|
||||
|
||||
validatorIdentities = [];
|
||||
for (const id of validatorIds) {
|
||||
const resp = getStateValidatorIndex(id, state, pubkey2index);
|
||||
@@ -197,6 +204,8 @@ export function getBeaconStateApi({
|
||||
const {state, executionOptimistic, finalized} = await getState(stateId);
|
||||
|
||||
if (validatorIds.length) {
|
||||
assertUniqueItems(validatorIds, "Duplicate validator IDs provided");
|
||||
|
||||
const headState = chain.getHeadState();
|
||||
const balances: routes.beacon.ValidatorBalance[] = [];
|
||||
for (const id of validatorIds) {
|
||||
|
||||
@@ -19,6 +19,13 @@ export function renderJsonSpec(config: ChainConfig): routes.config.Spec {
|
||||
const configJson = chainConfigToJson(config);
|
||||
const presetJson = presetToJson(activePreset);
|
||||
const constantsJson = specValuesToJson(specConstants);
|
||||
|
||||
// TODO Fulu: remove this check once interop issues are resolved
|
||||
// see https://github.com/attestantio/go-eth2-client/issues/230
|
||||
if (config.FULU_FORK_EPOCH === Infinity) {
|
||||
delete configJson.BLOB_SCHEDULE;
|
||||
}
|
||||
|
||||
return {...configJson, ...presetJson, ...constantsJson};
|
||||
}
|
||||
|
||||
|
||||
25
packages/beacon-node/src/api/impl/utils.ts
Normal file
25
packages/beacon-node/src/api/impl/utils.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import {ApiError} from "./errors.js";
|
||||
|
||||
/**
|
||||
* Ensures that the array contains unique values, and throws an ApiError
|
||||
* otherwise.
|
||||
* @param array - The array to check for uniqueness.
|
||||
* @param message - The message to put in the ApiError if the array contains
|
||||
* duplicates.
|
||||
*/
|
||||
export function assertUniqueItems(array: unknown[] | undefined, message: string): void {
|
||||
if (!array) {
|
||||
return;
|
||||
}
|
||||
|
||||
const duplicateItems = array.reduce((partialDuplicateItems: unknown[], item, index) => {
|
||||
if (array.indexOf(item) !== index && !partialDuplicateItems.includes(item)) {
|
||||
return partialDuplicateItems.concat(item);
|
||||
}
|
||||
return partialDuplicateItems;
|
||||
}, []);
|
||||
|
||||
if (duplicateItems.length) {
|
||||
throw new ApiError(400, `${message}: ${duplicateItems.join(", ")}`);
|
||||
}
|
||||
}
|
||||
@@ -1312,10 +1312,12 @@ export function getValidatorApi(
|
||||
contributionAndProof,
|
||||
true // skip known participants check
|
||||
);
|
||||
chain.syncContributionAndProofPool.add(
|
||||
const insertOutcome = chain.syncContributionAndProofPool.add(
|
||||
contributionAndProof.message,
|
||||
syncCommitteeParticipantIndices.length
|
||||
syncCommitteeParticipantIndices.length,
|
||||
true
|
||||
);
|
||||
metrics?.opPool.syncContributionAndProofPool.apiInsertOutcome.inc({insertOutcome});
|
||||
await network.publishContributionAndProof(contributionAndProof);
|
||||
} catch (e) {
|
||||
const logCtx = {
|
||||
|
||||
@@ -9,7 +9,8 @@ import {
|
||||
stateTransition,
|
||||
} from "@lodestar/state-transition";
|
||||
import {IBeaconDb} from "../../../db/index.js";
|
||||
import {HistoricalStateRegenMetrics, RegenErrorType} from "./types.js";
|
||||
import {HistoricalStateRegenMetrics} from "./metrics.js";
|
||||
import {RegenErrorType} from "./types.js";
|
||||
|
||||
/**
|
||||
* Populate a PubkeyIndexMap with any new entries based on a BeaconState
|
||||
|
||||
@@ -0,0 +1,200 @@
|
||||
import {
|
||||
BeaconStateTransitionMetrics,
|
||||
EpochTransitionStep,
|
||||
StateCloneSource,
|
||||
StateHashTreeRootSource,
|
||||
} from "@lodestar/state-transition";
|
||||
import {Gauge, Histogram} from "@lodestar/utils";
|
||||
import {RegistryMetricCreator} from "../../../metrics/index.js";
|
||||
import {QueueMetrics} from "../../../util/queue/options.js";
|
||||
import {RegenErrorType} from "./types.js";
|
||||
|
||||
export type HistoricalStateTransitionMetrics = BeaconStateTransitionMetrics;
|
||||
|
||||
export type HistoricalStateRegenMetrics = HistoricalStateTransitionMetrics & {
|
||||
regenTime: Histogram;
|
||||
loadStateTime: Histogram;
|
||||
stateTransitionTime: Histogram;
|
||||
stateTransitionBlocks: Histogram;
|
||||
stateSerializationTime: Histogram;
|
||||
regenRequestCount: Gauge;
|
||||
regenSuccessCount: Gauge;
|
||||
regenErrorCount: Gauge<{reason: RegenErrorType}>;
|
||||
};
|
||||
|
||||
export function createHistoricalStateTransitionMetrics(
|
||||
metricsRegister: RegistryMetricCreator
|
||||
): HistoricalStateTransitionMetrics {
|
||||
return {
|
||||
// state transition metrics
|
||||
epochTransitionTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_stfn_epoch_transition_seconds",
|
||||
help: "Time to process a single epoch transition in seconds",
|
||||
// Epoch transitions are 100ms on very fast clients, and average 800ms on heavy networks
|
||||
buckets: [0.01, 0.05, 0.1, 0.2, 0.5, 0.75, 1, 1.25, 1.5, 3, 10],
|
||||
}),
|
||||
epochTransitionCommitTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_stfn_epoch_transition_commit_seconds",
|
||||
help: "Time to call commit after process a single epoch transition in seconds",
|
||||
buckets: [0.01, 0.05, 0.1, 0.2, 0.5, 0.75, 1],
|
||||
}),
|
||||
epochTransitionStepTime: metricsRegister.histogram<{step: EpochTransitionStep}>({
|
||||
name: "lodestar_historical_state_stfn_epoch_transition_step_seconds",
|
||||
help: "Time to call each step of epoch transition in seconds",
|
||||
labelNames: ["step"],
|
||||
buckets: [0.01, 0.05, 0.1, 0.2, 0.5, 0.75, 1],
|
||||
}),
|
||||
processBlockTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_stfn_process_block_seconds",
|
||||
help: "Time to process a single block in seconds",
|
||||
// TODO: Add metrics for each step
|
||||
// Block processing can take 5-40ms, 100ms max
|
||||
buckets: [0.005, 0.01, 0.02, 0.05, 0.1, 1],
|
||||
}),
|
||||
processBlockCommitTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_stfn_process_block_commit_seconds",
|
||||
help: "Time to call commit after process a single block in seconds",
|
||||
buckets: [0.005, 0.01, 0.02, 0.05, 0.1, 1],
|
||||
}),
|
||||
stateHashTreeRootTime: metricsRegister.histogram<{source: StateHashTreeRootSource}>({
|
||||
name: "lodestar_historical_state_stfn_hash_tree_root_seconds",
|
||||
help: "Time to compute the hash tree root of a post state in seconds",
|
||||
buckets: [0.05, 0.1, 0.2, 0.5, 1, 1.5],
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
numEffectiveBalanceUpdates: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_num_effective_balance_updates_count",
|
||||
help: "Count of effective balance updates in epoch transition",
|
||||
}),
|
||||
preStateBalancesNodesPopulatedMiss: metricsRegister.gauge<{source: StateCloneSource}>({
|
||||
name: "lodestar_historical_state_stfn_balances_nodes_populated_miss_total",
|
||||
help: "Total count state.balances nodesPopulated is false on stfn",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
preStateBalancesNodesPopulatedHit: metricsRegister.gauge<{source: StateCloneSource}>({
|
||||
name: "lodestar_historical_state_stfn_balances_nodes_populated_hit_total",
|
||||
help: "Total count state.balances nodesPopulated is true on stfn",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
preStateValidatorsNodesPopulatedMiss: metricsRegister.gauge<{source: StateCloneSource}>({
|
||||
name: "lodestar_historical_state_stfn_validators_nodes_populated_miss_total",
|
||||
help: "Total count state.validators nodesPopulated is false on stfn",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
preStateValidatorsNodesPopulatedHit: metricsRegister.gauge<{source: StateCloneSource}>({
|
||||
name: "lodestar_historical_state_stfn_validators_nodes_populated_hit_total",
|
||||
help: "Total count state.validators nodesPopulated is true on stfn",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
preStateClonedCount: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_stfn_state_cloned_count",
|
||||
help: "Histogram of cloned count per state every time state.clone() is called",
|
||||
buckets: [1, 2, 5, 10, 50, 250],
|
||||
}),
|
||||
postStateBalancesNodesPopulatedHit: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_post_state_balances_nodes_populated_hit_total",
|
||||
help: "Total count state.validators nodesPopulated is true on stfn for post state",
|
||||
}),
|
||||
postStateBalancesNodesPopulatedMiss: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_post_state_balances_nodes_populated_miss_total",
|
||||
help: "Total count state.validators nodesPopulated is false on stfn for post state",
|
||||
}),
|
||||
postStateValidatorsNodesPopulatedHit: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_post_state_validators_nodes_populated_hit_total",
|
||||
help: "Total count state.validators nodesPopulated is true on stfn for post state",
|
||||
}),
|
||||
postStateValidatorsNodesPopulatedMiss: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_post_state_validators_nodes_populated_miss_total",
|
||||
help: "Total count state.validators nodesPopulated is false on stfn for post state",
|
||||
}),
|
||||
newSeenAttestersPerBlock: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_new_seen_attesters_per_block_total",
|
||||
help: "Count of new seen attesters in epoch transition",
|
||||
}),
|
||||
newSeenAttestersEffectiveBalancePerBlock: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_new_seen_attesters_effective_balance_per_block_total",
|
||||
help: "Total effective balance increment of new seen attesters per block",
|
||||
}),
|
||||
attestationsPerBlock: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_attestations_per_block_total",
|
||||
help: "Count of attestations per block",
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
export function createHistoricalStateRegenMetrics(metricsRegister: RegistryMetricCreator): HistoricalStateRegenMetrics {
|
||||
return {
|
||||
...createHistoricalStateTransitionMetrics(metricsRegister),
|
||||
// historical state regen metrics
|
||||
regenTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_regen_time_seconds",
|
||||
help: "Time to regenerate a historical state in seconds",
|
||||
// Historical state regen can take up to 3h as of Aug 2024
|
||||
// 5m, 10m, 30m, 1h, 3h
|
||||
buckets: [5 * 60, 10 * 60, 30 * 60, 60 * 60, 180 * 60],
|
||||
}),
|
||||
loadStateTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_load_nearest_state_time_seconds",
|
||||
help: "Time to load a nearest historical state from the database in seconds",
|
||||
// 30s, 1m, 2m, 4m
|
||||
buckets: [30, 60, 120, 240],
|
||||
}),
|
||||
stateTransitionTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_state_transition_time_seconds",
|
||||
help: "Time to run state transition to regen historical state in seconds",
|
||||
// 5m, 10m, 30m, 1h, 3h
|
||||
buckets: [5 * 60, 10 * 60, 30 * 60, 60 * 60, 180 * 60],
|
||||
}),
|
||||
stateTransitionBlocks: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_state_transition_blocks",
|
||||
help: "Count of blocks processed during state transition to regen historical state",
|
||||
// given archiveStateEpochFrequency=1024, it could process up to 32768 blocks
|
||||
buckets: [10, 100, 1000, 10000, 30000],
|
||||
}),
|
||||
stateSerializationTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_serialization_time_seconds",
|
||||
help: "Time to serialize a historical state in seconds",
|
||||
buckets: [0.25, 0.5, 1, 2],
|
||||
}),
|
||||
regenRequestCount: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_request_count",
|
||||
help: "Count of total historical state requests",
|
||||
}),
|
||||
regenSuccessCount: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_success_count",
|
||||
help: "Count of successful historical state regen",
|
||||
}),
|
||||
regenErrorCount: metricsRegister.gauge<{reason: RegenErrorType}>({
|
||||
name: "lodestar_historical_state_error_count",
|
||||
help: "Count of failed historical state regen",
|
||||
labelNames: ["reason"],
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
export function createHistoricalStateQueueMetrics(metricsRegister: RegistryMetricCreator): QueueMetrics {
|
||||
return {
|
||||
length: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_queue_length",
|
||||
help: "Count of total regen queue length",
|
||||
}),
|
||||
droppedJobs: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_queue_dropped_jobs_total",
|
||||
help: "Count of total regen queue dropped jobs",
|
||||
}),
|
||||
jobTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_queue_job_time_seconds",
|
||||
help: "Time to process regen queue job in seconds",
|
||||
buckets: [0.01, 0.1, 1, 10, 100],
|
||||
}),
|
||||
jobWaitTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_queue_job_wait_time_seconds",
|
||||
help: "Time from job added to the regen queue to starting in seconds",
|
||||
buckets: [0.01, 0.1, 1, 10, 100],
|
||||
}),
|
||||
concurrency: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_queue_concurrency",
|
||||
help: "Current concurrency of regen queue",
|
||||
}),
|
||||
};
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
import {ModuleThread} from "@chainsafe/threads";
|
||||
import {BeaconConfig, SpecJson} from "@lodestar/config";
|
||||
import {LoggerNode, LoggerNodeOpts} from "@lodestar/logger/node";
|
||||
import {BeaconStateTransitionMetrics} from "@lodestar/state-transition";
|
||||
import {Gauge, Histogram} from "@lodestar/utils";
|
||||
import {Metrics} from "../../../metrics/index.js";
|
||||
|
||||
export type HistoricalStateRegenInitModules = {
|
||||
@@ -41,14 +39,3 @@ export enum RegenErrorType {
|
||||
invalidStateRoot = "invalid_state_root",
|
||||
blockProcessing = "block_processing",
|
||||
}
|
||||
|
||||
export type HistoricalStateRegenMetrics = BeaconStateTransitionMetrics & {
|
||||
regenTime: Histogram;
|
||||
loadStateTime: Histogram;
|
||||
stateTransitionTime: Histogram;
|
||||
stateTransitionBlocks: Histogram;
|
||||
stateSerializationTime: Histogram;
|
||||
regenRequestCount: Gauge;
|
||||
regenSuccessCount: Gauge;
|
||||
regenErrorCount: Gauge<{reason: RegenErrorType}>;
|
||||
};
|
||||
|
||||
@@ -4,7 +4,6 @@ import {Transfer, expose} from "@chainsafe/threads/worker";
|
||||
import {chainConfigFromJson, createBeaconConfig} from "@lodestar/config";
|
||||
import {LevelDbController} from "@lodestar/db";
|
||||
import {getNodeLogger} from "@lodestar/logger/node";
|
||||
import {EpochTransitionStep, StateCloneSource, StateHashTreeRootSource} from "@lodestar/state-transition";
|
||||
import {BeaconDb} from "../../../db/index.js";
|
||||
import {RegistryMetricCreator, collectNodeJSMetrics} from "../../../metrics/index.js";
|
||||
import {JobFnQueue} from "../../../util/queue/fnQueue.js";
|
||||
@@ -12,10 +11,10 @@ import {QueueMetrics} from "../../../util/queue/options.js";
|
||||
import {getHistoricalState} from "./getHistoricalState.js";
|
||||
import {
|
||||
HistoricalStateRegenMetrics,
|
||||
HistoricalStateWorkerApi,
|
||||
HistoricalStateWorkerData,
|
||||
RegenErrorType,
|
||||
} from "./types.js";
|
||||
createHistoricalStateQueueMetrics,
|
||||
createHistoricalStateRegenMetrics,
|
||||
} from "./metrics.js";
|
||||
import {HistoricalStateWorkerApi, HistoricalStateWorkerData} from "./types.js";
|
||||
|
||||
// most of this setup copied from networkCoreWorker.ts
|
||||
|
||||
@@ -35,175 +34,13 @@ const abortController = new AbortController();
|
||||
const metricsRegister = workerData.metricsEnabled ? new RegistryMetricCreator() : null;
|
||||
let historicalStateRegenMetrics: HistoricalStateRegenMetrics | undefined;
|
||||
let queueMetrics: QueueMetrics | undefined;
|
||||
|
||||
if (metricsRegister) {
|
||||
const closeMetrics = collectNodeJSMetrics(metricsRegister, "lodestar_historical_state_worker_");
|
||||
abortController.signal.addEventListener("abort", closeMetrics, {once: true});
|
||||
|
||||
historicalStateRegenMetrics = {
|
||||
// state transition metrics
|
||||
epochTransitionTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_stfn_epoch_transition_seconds",
|
||||
help: "Time to process a single epoch transition in seconds",
|
||||
// Epoch transitions are 100ms on very fast clients, and average 800ms on heavy networks
|
||||
buckets: [0.01, 0.05, 0.1, 0.2, 0.5, 0.75, 1, 1.25, 1.5, 3, 10],
|
||||
}),
|
||||
epochTransitionCommitTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_stfn_epoch_transition_commit_seconds",
|
||||
help: "Time to call commit after process a single epoch transition in seconds",
|
||||
buckets: [0.01, 0.05, 0.1, 0.2, 0.5, 0.75, 1],
|
||||
}),
|
||||
epochTransitionStepTime: metricsRegister.histogram<{step: EpochTransitionStep}>({
|
||||
name: "lodestar_historical_state_stfn_epoch_transition_step_seconds",
|
||||
help: "Time to call each step of epoch transition in seconds",
|
||||
labelNames: ["step"],
|
||||
buckets: [0.01, 0.05, 0.1, 0.2, 0.5, 0.75, 1],
|
||||
}),
|
||||
processBlockTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_stfn_process_block_seconds",
|
||||
help: "Time to process a single block in seconds",
|
||||
// TODO: Add metrics for each step
|
||||
// Block processing can take 5-40ms, 100ms max
|
||||
buckets: [0.005, 0.01, 0.02, 0.05, 0.1, 1],
|
||||
}),
|
||||
processBlockCommitTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_stfn_process_block_commit_seconds",
|
||||
help: "Time to call commit after process a single block in seconds",
|
||||
buckets: [0.005, 0.01, 0.02, 0.05, 0.1, 1],
|
||||
}),
|
||||
stateHashTreeRootTime: metricsRegister.histogram<{source: StateHashTreeRootSource}>({
|
||||
name: "lodestar_historical_state_stfn_hash_tree_root_seconds",
|
||||
help: "Time to compute the hash tree root of a post state in seconds",
|
||||
buckets: [0.05, 0.1, 0.2, 0.5, 1, 1.5],
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
numEffectiveBalanceUpdates: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_num_effective_balance_updates_count",
|
||||
help: "Count of effective balance updates in epoch transition",
|
||||
}),
|
||||
preStateBalancesNodesPopulatedMiss: metricsRegister.gauge<{source: StateCloneSource}>({
|
||||
name: "lodestar_historical_state_stfn_balances_nodes_populated_miss_total",
|
||||
help: "Total count state.balances nodesPopulated is false on stfn",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
preStateBalancesNodesPopulatedHit: metricsRegister.gauge<{source: StateCloneSource}>({
|
||||
name: "lodestar_historical_state_stfn_balances_nodes_populated_hit_total",
|
||||
help: "Total count state.balances nodesPopulated is true on stfn",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
preStateValidatorsNodesPopulatedMiss: metricsRegister.gauge<{source: StateCloneSource}>({
|
||||
name: "lodestar_historical_state_stfn_validators_nodes_populated_miss_total",
|
||||
help: "Total count state.validators nodesPopulated is false on stfn",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
preStateValidatorsNodesPopulatedHit: metricsRegister.gauge<{source: StateCloneSource}>({
|
||||
name: "lodestar_historical_state_stfn_validators_nodes_populated_hit_total",
|
||||
help: "Total count state.validators nodesPopulated is true on stfn",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
preStateClonedCount: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_stfn_state_cloned_count",
|
||||
help: "Histogram of cloned count per state every time state.clone() is called",
|
||||
buckets: [1, 2, 5, 10, 50, 250],
|
||||
}),
|
||||
postStateBalancesNodesPopulatedHit: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_post_state_balances_nodes_populated_hit_total",
|
||||
help: "Total count state.validators nodesPopulated is true on stfn for post state",
|
||||
}),
|
||||
postStateBalancesNodesPopulatedMiss: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_post_state_balances_nodes_populated_miss_total",
|
||||
help: "Total count state.validators nodesPopulated is false on stfn for post state",
|
||||
}),
|
||||
postStateValidatorsNodesPopulatedHit: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_post_state_validators_nodes_populated_hit_total",
|
||||
help: "Total count state.validators nodesPopulated is true on stfn for post state",
|
||||
}),
|
||||
postStateValidatorsNodesPopulatedMiss: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_post_state_validators_nodes_populated_miss_total",
|
||||
help: "Total count state.validators nodesPopulated is false on stfn for post state",
|
||||
}),
|
||||
newSeenAttestersPerBlock: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_new_seen_attesters_per_block_total",
|
||||
help: "Count of new seen attesters in epoch transition",
|
||||
}),
|
||||
newSeenAttestersEffectiveBalancePerBlock: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_new_seen_attesters_effective_balance_per_block_total",
|
||||
help: "Total effective balance increment of new seen attesters per block",
|
||||
}),
|
||||
attestationsPerBlock: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_stfn_attestations_per_block_total",
|
||||
help: "Count of attestations per block",
|
||||
}),
|
||||
|
||||
// historical state regen metrics
|
||||
regenTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_regen_time_seconds",
|
||||
help: "Time to regenerate a historical state in seconds",
|
||||
// Historical state regen can take up to 3h as of Aug 2024
|
||||
// 5m, 10m, 30m, 1h, 3h
|
||||
buckets: [5 * 60, 10 * 60, 30 * 60, 60 * 60, 180 * 60],
|
||||
}),
|
||||
loadStateTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_load_nearest_state_time_seconds",
|
||||
help: "Time to load a nearest historical state from the database in seconds",
|
||||
// 30s, 1m, 2m, 4m
|
||||
buckets: [30, 60, 120, 240],
|
||||
}),
|
||||
stateTransitionTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_state_transition_time_seconds",
|
||||
help: "Time to run state transition to regen historical state in seconds",
|
||||
// 5m, 10m, 30m, 1h, 3h
|
||||
buckets: [5 * 60, 10 * 60, 30 * 60, 60 * 60, 180 * 60],
|
||||
}),
|
||||
stateTransitionBlocks: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_state_transition_blocks",
|
||||
help: "Count of blocks processed during state transition to regen historical state",
|
||||
// given archiveStateEpochFrequency=1024, it could process up to 32768 blocks
|
||||
buckets: [10, 100, 1000, 10000, 30000],
|
||||
}),
|
||||
stateSerializationTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_serialization_time_seconds",
|
||||
help: "Time to serialize a historical state in seconds",
|
||||
buckets: [0.25, 0.5, 1, 2],
|
||||
}),
|
||||
regenRequestCount: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_request_count",
|
||||
help: "Count of total historical state requests",
|
||||
}),
|
||||
regenSuccessCount: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_success_count",
|
||||
help: "Count of successful historical state regen",
|
||||
}),
|
||||
regenErrorCount: metricsRegister.gauge<{reason: RegenErrorType}>({
|
||||
name: "lodestar_historical_state_error_count",
|
||||
help: "Count of failed historical state regen",
|
||||
labelNames: ["reason"],
|
||||
}),
|
||||
};
|
||||
|
||||
queueMetrics = {
|
||||
length: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_queue_length",
|
||||
help: "Count of total regen queue length",
|
||||
}),
|
||||
droppedJobs: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_queue_dropped_jobs_total",
|
||||
help: "Count of total regen queue dropped jobs",
|
||||
}),
|
||||
jobTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_queue_job_time_seconds",
|
||||
help: "Time to process regen queue job in seconds",
|
||||
buckets: [0.01, 0.1, 1, 10, 100],
|
||||
}),
|
||||
jobWaitTime: metricsRegister.histogram({
|
||||
name: "lodestar_historical_state_queue_job_wait_time_seconds",
|
||||
help: "Time from job added to the regen queue to starting in seconds",
|
||||
buckets: [0.01, 0.1, 1, 10, 100],
|
||||
}),
|
||||
concurrency: metricsRegister.gauge({
|
||||
name: "lodestar_historical_state_queue_concurrency",
|
||||
help: "Current concurrency of regen queue",
|
||||
}),
|
||||
};
|
||||
historicalStateRegenMetrics = createHistoricalStateRegenMetrics(metricsRegister);
|
||||
queueMetrics = createHistoricalStateQueueMetrics(metricsRegister);
|
||||
}
|
||||
|
||||
const queue = new JobFnQueue(
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
import {ForkName, ForkPreDeneb} from "@lodestar/params";
|
||||
import {BlobIndex, ColumnIndex, SignedBeaconBlock, Slot, deneb, fulu} from "@lodestar/types";
|
||||
import {fromHex, prettyBytes, toHex, withTimeout} from "@lodestar/utils";
|
||||
import {fromHex, prettyBytes, toRootHex, withTimeout} from "@lodestar/utils";
|
||||
import {VersionedHashes} from "../../../execution/index.js";
|
||||
import {kzgCommitmentToVersionedHash} from "../../../util/blobs.js";
|
||||
import {byteArrayEquals} from "../../../util/bytes.js";
|
||||
import {BlockInputError, BlockInputErrorCode} from "./errors.js";
|
||||
import {
|
||||
AddBlob,
|
||||
@@ -27,7 +26,18 @@ import {
|
||||
|
||||
export type BlockInput = BlockInputPreData | BlockInputBlobs | BlockInputColumns;
|
||||
|
||||
export function createPromise<T>(): PromiseParts<T> {
|
||||
export function isBlockInputPreDeneb(blockInput: IBlockInput): blockInput is BlockInputPreData {
|
||||
return blockInput.type === DAType.PreData;
|
||||
}
|
||||
export function isBlockInputBlobs(blockInput: IBlockInput): blockInput is BlockInputBlobs {
|
||||
return blockInput.type === DAType.Blobs;
|
||||
}
|
||||
|
||||
export function isBlockInputColumns(blockInput: IBlockInput): blockInput is BlockInputColumns {
|
||||
return blockInput.type === DAType.Columns;
|
||||
}
|
||||
|
||||
function createPromise<T>(): PromiseParts<T> {
|
||||
let resolve!: (value: T) => void;
|
||||
let reject!: (e: Error) => void;
|
||||
const promise = new Promise<T>((_resolve, _reject) => {
|
||||
@@ -69,7 +79,7 @@ abstract class AbstractBlockInput<F extends ForkName = ForkName, TData extends D
|
||||
{
|
||||
abstract type: DAType;
|
||||
daOutOfRange: boolean;
|
||||
timeCreated: number;
|
||||
timeCreatedSec: number;
|
||||
|
||||
forkName: ForkName;
|
||||
slot: Slot;
|
||||
@@ -83,7 +93,7 @@ abstract class AbstractBlockInput<F extends ForkName = ForkName, TData extends D
|
||||
|
||||
constructor(init: BlockInputInit) {
|
||||
this.daOutOfRange = init.daOutOfRange;
|
||||
this.timeCreated = init.timeCreated;
|
||||
this.timeCreatedSec = init.timeCreated;
|
||||
this.forkName = init.forkName;
|
||||
this.slot = init.slot;
|
||||
this.blockRootHex = init.blockRootHex;
|
||||
@@ -134,6 +144,7 @@ abstract class AbstractBlockInput<F extends ForkName = ForkName, TData extends D
|
||||
return {
|
||||
blockRoot: prettyBytes(this.blockRootHex),
|
||||
slot: this.slot,
|
||||
timeCreatedSec: this.timeCreatedSec,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -199,7 +210,7 @@ export class BlockInputPreData extends AbstractBlockInput<ForkPreDeneb, null> {
|
||||
forkName: props.forkName,
|
||||
slot: props.block.message.slot,
|
||||
blockRootHex: props.blockRootHex,
|
||||
parentRootHex: toHex(props.block.message.parentRoot),
|
||||
parentRootHex: toRootHex(props.block.message.parentRoot),
|
||||
};
|
||||
const state: BlockInputPreDataState = {
|
||||
hasBlock: true,
|
||||
@@ -281,7 +292,7 @@ export class BlockInputBlobs extends AbstractBlockInput<ForkBlobsDA, deneb.BlobS
|
||||
forkName: props.forkName,
|
||||
slot: props.block.message.slot,
|
||||
blockRootHex: props.blockRootHex,
|
||||
parentRootHex: toHex(props.block.message.parentRoot),
|
||||
parentRootHex: toRootHex(props.block.message.parentRoot),
|
||||
};
|
||||
const blockInput = new BlockInputBlobs(init, state);
|
||||
blockInput.blockPromise.resolve(props.block);
|
||||
@@ -301,7 +312,7 @@ export class BlockInputBlobs extends AbstractBlockInput<ForkBlobsDA, deneb.BlobS
|
||||
timeCreated: props.seenTimestampSec,
|
||||
forkName: props.forkName,
|
||||
blockRootHex: props.blockRootHex,
|
||||
parentRootHex: toHex(props.blobSidecar.signedBlockHeader.message.parentRoot),
|
||||
parentRootHex: toRootHex(props.blobSidecar.signedBlockHeader.message.parentRoot),
|
||||
slot: props.blobSidecar.signedBlockHeader.message.slot,
|
||||
};
|
||||
const blockInput = new BlockInputBlobs(init, state);
|
||||
@@ -318,13 +329,14 @@ export class BlockInputBlobs extends AbstractBlockInput<ForkBlobsDA, deneb.BlobS
|
||||
return {
|
||||
blockRoot: prettyBytes(this.blockRootHex),
|
||||
slot: this.slot,
|
||||
timeCreatedSec: this.timeCreatedSec,
|
||||
expectedBlobs: this.state.hasBlock ? this.state.block.message.body.blobKzgCommitments.length : "unknown",
|
||||
receivedBlobs: this.blobsCache.size,
|
||||
};
|
||||
}
|
||||
|
||||
addBlock({blockRootHex, block, source}: AddBlock<ForkBlobsDA>): void {
|
||||
if (!this.state.hasBlock) {
|
||||
if (this.state.hasBlock) {
|
||||
throw new BlockInputError(
|
||||
{
|
||||
code: BlockInputErrorCode.INVALID_CONSTRUCTION,
|
||||
@@ -360,6 +372,7 @@ export class BlockInputBlobs extends AbstractBlockInput<ForkBlobsDA, deneb.BlobS
|
||||
|
||||
this.state = {
|
||||
...this.state,
|
||||
hasBlock: true,
|
||||
hasAllData,
|
||||
block,
|
||||
versionedHashes: block.message.body.blobKzgCommitments.map(kzgCommitmentToVersionedHash),
|
||||
@@ -486,7 +499,11 @@ export class BlockInputBlobs extends AbstractBlockInput<ForkBlobsDA, deneb.BlobS
|
||||
}
|
||||
|
||||
function blockAndBlobArePaired(block: SignedBeaconBlock<ForkBlobsDA>, blobSidecar: deneb.BlobSidecar): boolean {
|
||||
return byteArrayEquals(block.message.body.blobKzgCommitments[blobSidecar.index], blobSidecar.kzgCommitment);
|
||||
const blockCommitment = block.message.body.blobKzgCommitments[blobSidecar.index];
|
||||
if (!blockCommitment || !blobSidecar.kzgCommitment) {
|
||||
return false;
|
||||
}
|
||||
return Buffer.compare(blockCommitment, blobSidecar.kzgCommitment) === 0;
|
||||
}
|
||||
|
||||
function assertBlockAndBlobArePaired(
|
||||
@@ -590,7 +607,7 @@ export class BlockInputColumns extends AbstractBlockInput<ForkColumnsDA, fulu.Da
|
||||
timeCreated: props.source.seenTimestampSec,
|
||||
forkName: props.forkName,
|
||||
blockRootHex: props.blockRootHex,
|
||||
parentRootHex: toHex(props.block.message.parentRoot),
|
||||
parentRootHex: toRootHex(props.block.message.parentRoot),
|
||||
slot: props.block.message.slot,
|
||||
};
|
||||
const blockInput = new BlockInputColumns(init, state, props.sampledColumns, props.custodyColumns);
|
||||
@@ -616,7 +633,7 @@ export class BlockInputColumns extends AbstractBlockInput<ForkColumnsDA, fulu.Da
|
||||
timeCreated: props.seenTimestampSec,
|
||||
forkName: props.forkName,
|
||||
blockRootHex: props.blockRootHex,
|
||||
parentRootHex: toHex(props.columnSidecar.signedBlockHeader.message.parentRoot),
|
||||
parentRootHex: toRootHex(props.columnSidecar.signedBlockHeader.message.parentRoot),
|
||||
slot: props.columnSidecar.signedBlockHeader.message.slot,
|
||||
};
|
||||
const blockInput = new BlockInputColumns(init, state, props.sampledColumns, props.custodyColumns);
|
||||
@@ -630,6 +647,7 @@ export class BlockInputColumns extends AbstractBlockInput<ForkColumnsDA, fulu.Da
|
||||
return {
|
||||
blockRoot: prettyBytes(this.blockRootHex),
|
||||
slot: this.slot,
|
||||
timeCreatedSec: this.timeCreatedSec,
|
||||
expectedColumns:
|
||||
this.state.hasBlock && this.state.block.message.body.blobKzgCommitments.length === 0
|
||||
? 0
|
||||
@@ -778,7 +796,7 @@ function blockAndColumnArePaired(
|
||||
return (
|
||||
block.message.body.blobKzgCommitments.length === columnSidecar.kzgCommitments.length &&
|
||||
block.message.body.blobKzgCommitments.every((commitment, index) =>
|
||||
byteArrayEquals(commitment, columnSidecar.kzgCommitments[index])
|
||||
Buffer.compare(commitment, columnSidecar.kzgCommitments[index])
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
export * from "./blockInput.js";
|
||||
export * from "./errors.js";
|
||||
export * from "./utils.js";
|
||||
export * from "./types.js";
|
||||
|
||||
@@ -30,6 +30,7 @@ export type PromiseParts<T> = {
|
||||
export type LogMetaBasic = {
|
||||
slot: number;
|
||||
blockRoot: string;
|
||||
timeCreatedSec: number;
|
||||
};
|
||||
|
||||
export type LogMetaBlobs = LogMetaBasic & {
|
||||
@@ -103,7 +104,7 @@ export interface IBlockInput<F extends ForkName = ForkName, TData extends DAData
|
||||
/** validator activities can't be performed on out of range data */
|
||||
daOutOfRange: boolean;
|
||||
|
||||
timeCreated: number;
|
||||
timeCreatedSec: number;
|
||||
// block header metadata
|
||||
forkName: ForkName;
|
||||
slot: Slot;
|
||||
|
||||
44
packages/beacon-node/src/chain/blocks/blockInput/utils.ts
Normal file
44
packages/beacon-node/src/chain/blocks/blockInput/utils.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ForkName, isForkPostDeneb} from "@lodestar/params";
|
||||
import {computeEpochAtSlot} from "@lodestar/state-transition";
|
||||
import {Epoch, Slot} from "@lodestar/types";
|
||||
import {BlobsSource, BlockSource as BlockSourceOld} from "../types.js";
|
||||
import {BlockInputSource as BlockSource} from "./types.js";
|
||||
|
||||
export function isDaOutOfRange(
|
||||
config: ChainForkConfig,
|
||||
forkName: ForkName,
|
||||
blockSlot: Slot,
|
||||
currentEpoch: Epoch
|
||||
): boolean {
|
||||
if (!isForkPostDeneb(forkName)) {
|
||||
return true;
|
||||
}
|
||||
return computeEpochAtSlot(blockSlot) < currentEpoch - config.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS;
|
||||
}
|
||||
|
||||
export function convertNewToOldBlockSource(source: BlockSource): BlockSourceOld {
|
||||
switch (source) {
|
||||
case BlockSource.api:
|
||||
return BlockSourceOld.api;
|
||||
case BlockSource.byRoot:
|
||||
return BlockSourceOld.byRoot;
|
||||
case BlockSource.byRange:
|
||||
return BlockSourceOld.byRange;
|
||||
default:
|
||||
return BlockSourceOld.gossip;
|
||||
}
|
||||
}
|
||||
|
||||
export function convertNewToOldBlobSource(source: BlockSource): BlobsSource {
|
||||
switch (source) {
|
||||
case BlockSource.api:
|
||||
return BlobsSource.api;
|
||||
case BlockSource.byRoot:
|
||||
return BlobsSource.byRoot;
|
||||
case BlockSource.byRange:
|
||||
return BlobsSource.byRange;
|
||||
default:
|
||||
return BlobsSource.gossip;
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
import {toHexString} from "@chainsafe/ssz";
|
||||
import {BitArray, toHexString} from "@chainsafe/ssz";
|
||||
import {routes} from "@lodestar/api";
|
||||
import {AncestorStatus, EpochDifference, ForkChoiceError, ForkChoiceErrorCode} from "@lodestar/fork-choice";
|
||||
import {
|
||||
ForkName,
|
||||
ForkPostAltair,
|
||||
ForkPostElectra,
|
||||
ForkSeq,
|
||||
INTERVALS_PER_SLOT,
|
||||
MAX_SEED_LOOKAHEAD,
|
||||
@@ -11,13 +12,14 @@ import {
|
||||
} from "@lodestar/params";
|
||||
import {
|
||||
CachedBeaconStateAltair,
|
||||
EpochCache,
|
||||
RootCache,
|
||||
computeEpochAtSlot,
|
||||
computeStartSlotAtEpoch,
|
||||
isStateValidatorsNodesPopulated,
|
||||
} from "@lodestar/state-transition";
|
||||
import {BeaconBlock, altair, capella, ssz} from "@lodestar/types";
|
||||
import {isErrorAborted, toRootHex} from "@lodestar/utils";
|
||||
import {Attestation, BeaconBlock, altair, capella, electra, phase0, ssz} from "@lodestar/types";
|
||||
import {isErrorAborted, toHex, toRootHex} from "@lodestar/utils";
|
||||
import {ZERO_HASH_HEX} from "../../constants/index.js";
|
||||
import {kzgCommitmentToVersionedHash} from "../../util/blobs.js";
|
||||
import {callInNextEventLoop} from "../../util/eventLoop.js";
|
||||
@@ -159,6 +161,8 @@ export async function importBlock(
|
||||
const rootCache = new RootCache(postState);
|
||||
const invalidAttestationErrorsByCode = new Map<string, {error: Error; count: number}>();
|
||||
|
||||
const addAttestation = fork >= ForkSeq.electra ? addAttestationPostElectra : addAttestationPreElectra;
|
||||
|
||||
for (const attestation of attestations) {
|
||||
try {
|
||||
// TODO Electra: figure out how to reuse the attesting indices computed from state transition
|
||||
@@ -166,11 +170,13 @@ export async function importBlock(
|
||||
const {target, beaconBlockRoot} = attestation.data;
|
||||
|
||||
const attDataRoot = toRootHex(ssz.phase0.AttestationData.hashTreeRoot(indexedAttestation.data));
|
||||
this.seenAggregatedAttestations.add(
|
||||
target.epoch,
|
||||
addAttestation.call(
|
||||
this,
|
||||
postState.epochCtx,
|
||||
target,
|
||||
attDataRoot,
|
||||
{aggregationBits: attestation.aggregationBits, trueBitCount: indexedAttestation.attestingIndices.length},
|
||||
true
|
||||
attestation as Attestation<ForkPostElectra>,
|
||||
indexedAttestation
|
||||
);
|
||||
// Duplicated logic from fork-choice onAttestation validation logic.
|
||||
// Attestations outside of this range will be dropped as Errors, so no need to import
|
||||
@@ -531,3 +537,58 @@ export async function importBlock(
|
||||
delaySec: this.clock.secFromSlot(blockSlot),
|
||||
});
|
||||
}
|
||||
|
||||
export function addAttestationPreElectra(
|
||||
this: BeaconChain,
|
||||
// added to have the same signature as addAttestationPostElectra
|
||||
_: EpochCache,
|
||||
target: phase0.Checkpoint,
|
||||
attDataRoot: string,
|
||||
attestation: Attestation,
|
||||
indexedAttestation: phase0.IndexedAttestation
|
||||
): void {
|
||||
this.seenAggregatedAttestations.add(
|
||||
target.epoch,
|
||||
attestation.data.index,
|
||||
attDataRoot,
|
||||
{aggregationBits: attestation.aggregationBits, trueBitCount: indexedAttestation.attestingIndices.length},
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
export function addAttestationPostElectra(
|
||||
this: BeaconChain,
|
||||
epochCtx: EpochCache,
|
||||
target: phase0.Checkpoint,
|
||||
attDataRoot: string,
|
||||
attestation: Attestation<ForkPostElectra>,
|
||||
indexedAttestation: electra.IndexedAttestation
|
||||
): void {
|
||||
const committeeIndices = attestation.committeeBits.getTrueBitIndexes();
|
||||
if (committeeIndices.length === 1) {
|
||||
this.seenAggregatedAttestations.add(
|
||||
target.epoch,
|
||||
committeeIndices[0],
|
||||
attDataRoot,
|
||||
{aggregationBits: attestation.aggregationBits, trueBitCount: indexedAttestation.attestingIndices.length},
|
||||
true
|
||||
);
|
||||
} else {
|
||||
const committees = epochCtx.getBeaconCommittees(attestation.data.slot, committeeIndices);
|
||||
const aggregationBools = attestation.aggregationBits.toBoolArray();
|
||||
let offset = 0;
|
||||
for (let i = 0; i < committees.length; i++) {
|
||||
const committee = committees[i];
|
||||
const aggregationBits = BitArray.fromBoolArray(aggregationBools.slice(offset, offset + committee.length));
|
||||
const trueBitCount = aggregationBits.getTrueBitIndexes().length;
|
||||
offset += committee.length;
|
||||
this.seenAggregatedAttestations.add(
|
||||
target.epoch,
|
||||
committeeIndices[i],
|
||||
attDataRoot,
|
||||
{aggregationBits, trueBitCount},
|
||||
true
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +101,7 @@ import {SeenGossipBlockInput} from "./seenCache/index.js";
|
||||
import {SeenAggregatedAttestations} from "./seenCache/seenAggregateAndProof.js";
|
||||
import {SeenAttestationDatas} from "./seenCache/seenAttestationData.js";
|
||||
import {SeenBlockAttesters} from "./seenCache/seenBlockAttesters.js";
|
||||
import {SeenBlockInputCache} from "./seenCache/seenBlockInput.js";
|
||||
import {ShufflingCache} from "./shufflingCache.js";
|
||||
import {BlockStateCacheImpl} from "./stateCache/blockStateCacheImpl.js";
|
||||
import {DbCPStateDatastore} from "./stateCache/datastore/db.js";
|
||||
@@ -146,7 +147,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
readonly attestationPool: AttestationPool;
|
||||
readonly aggregatedAttestationPool: AggregatedAttestationPool;
|
||||
readonly syncCommitteeMessagePool: SyncCommitteeMessagePool;
|
||||
readonly syncContributionAndProofPool = new SyncContributionAndProofPool();
|
||||
readonly syncContributionAndProofPool;
|
||||
readonly opPool = new OpPool();
|
||||
|
||||
// Gossip seen cache
|
||||
@@ -158,6 +159,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
readonly seenContributionAndProof: SeenContributionAndProof;
|
||||
readonly seenAttestationDatas: SeenAttestationDatas;
|
||||
readonly seenGossipBlockInput: SeenGossipBlockInput;
|
||||
readonly seenBlockInputCache: SeenBlockInputCache;
|
||||
// Seen cache for liveness checks
|
||||
readonly seenBlockAttesters = new SeenBlockAttesters();
|
||||
|
||||
@@ -259,6 +261,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
preAggregateCutOffTime,
|
||||
this.opts?.preaggregateSlotDistance
|
||||
);
|
||||
this.syncContributionAndProofPool = new SyncContributionAndProofPool(clock, metrics, logger);
|
||||
|
||||
this.seenAggregatedAttestations = new SeenAggregatedAttestations(metrics);
|
||||
this.seenContributionAndProof = new SeenContributionAndProof(metrics);
|
||||
@@ -269,6 +272,14 @@ export class BeaconChain implements IBeaconChain {
|
||||
|
||||
this.beaconProposerCache = new BeaconProposerCache(opts);
|
||||
this.checkpointBalancesCache = new CheckpointBalancesCache();
|
||||
this.seenBlockInputCache = new SeenBlockInputCache({
|
||||
config,
|
||||
clock,
|
||||
chainEvents: emitter,
|
||||
signal,
|
||||
metrics,
|
||||
logger,
|
||||
});
|
||||
|
||||
// Restore state caches
|
||||
// anchorState may already by a CachedBeaconState. If so, don't create the cache again, since deserializing all
|
||||
@@ -1104,7 +1115,7 @@ export class BeaconChain implements IBeaconChain {
|
||||
metrics.opPool.proposerSlashingPoolSize.set(this.opPool.proposerSlashingsSize);
|
||||
metrics.opPool.voluntaryExitPoolSize.set(this.opPool.voluntaryExitsSize);
|
||||
metrics.opPool.syncCommitteeMessagePoolSize.set(this.syncCommitteeMessagePool.size);
|
||||
metrics.opPool.syncContributionAndProofPoolSize.set(this.syncContributionAndProofPool.size);
|
||||
// syncContributionAndProofPool tracks metrics on its own
|
||||
metrics.opPool.blsToExecutionChangePoolSize.set(this.opPool.blsToExecutionChangeSize);
|
||||
metrics.chain.blacklistedBlocks.set(this.blacklistedBlocks.size);
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {ZERO_HASH} from "@lodestar/params";
|
||||
import {
|
||||
BeaconStateAllForks,
|
||||
CachedBeaconStateAllForks,
|
||||
computeEpochAtSlot,
|
||||
computeStartSlotAtEpoch,
|
||||
} from "@lodestar/state-transition";
|
||||
import {SignedBeaconBlock} from "@lodestar/types";
|
||||
import {SignedBeaconBlock, ssz} from "@lodestar/types";
|
||||
import {Logger, toHex, toRootHex} from "@lodestar/utils";
|
||||
import {GENESIS_SLOT} from "../constants/index.js";
|
||||
import {IBeaconDb} from "../db/index.js";
|
||||
@@ -40,6 +41,22 @@ export async function persistAnchorState(
|
||||
): Promise<void> {
|
||||
if (anchorState.slot === GENESIS_SLOT) {
|
||||
const genesisBlock = createGenesisBlock(config, anchorState);
|
||||
const blockRoot = config.getForkTypes(GENESIS_SLOT).BeaconBlock.hashTreeRoot(genesisBlock.message);
|
||||
|
||||
const latestBlockHeader = ssz.phase0.BeaconBlockHeader.clone(anchorState.latestBlockHeader);
|
||||
|
||||
if (ssz.Root.equals(latestBlockHeader.stateRoot, ZERO_HASH)) {
|
||||
latestBlockHeader.stateRoot = anchorState.hashTreeRoot();
|
||||
}
|
||||
|
||||
const latestBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(latestBlockHeader);
|
||||
|
||||
if (Buffer.compare(blockRoot, latestBlockRoot) !== 0) {
|
||||
throw Error(
|
||||
`Genesis block root ${toRootHex(blockRoot)} does not match genesis state latest block root ${toRootHex(latestBlockRoot)}`
|
||||
);
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
db.blockArchive.add(genesisBlock),
|
||||
db.block.add(genesisBlock),
|
||||
|
||||
@@ -62,6 +62,7 @@ import {SeenGossipBlockInput} from "./seenCache/index.js";
|
||||
import {SeenAggregatedAttestations} from "./seenCache/seenAggregateAndProof.js";
|
||||
import {SeenAttestationDatas} from "./seenCache/seenAttestationData.js";
|
||||
import {SeenBlockAttesters} from "./seenCache/seenBlockAttesters.js";
|
||||
import {SeenBlockInputCache} from "./seenCache/seenBlockInput.js";
|
||||
import {ShufflingCache} from "./shufflingCache.js";
|
||||
import {ValidatorMonitor} from "./validatorMonitor.js";
|
||||
|
||||
@@ -126,6 +127,7 @@ export interface IBeaconChain {
|
||||
readonly seenSyncCommitteeMessages: SeenSyncCommitteeMessages;
|
||||
readonly seenContributionAndProof: SeenContributionAndProof;
|
||||
readonly seenAttestationDatas: SeenAttestationDatas;
|
||||
readonly seenBlockInputCache: SeenBlockInputCache;
|
||||
readonly seenGossipBlockInput: SeenGossipBlockInput;
|
||||
// Seen cache for liveness checks
|
||||
readonly seenBlockAttesters: SeenBlockAttesters;
|
||||
|
||||
@@ -67,7 +67,22 @@ export type AttestationsConsolidation = {
|
||||
*/
|
||||
type GetNotSeenValidatorsFn = (epoch: Epoch, slot: Slot, committeeIndex: number) => Set<number> | null;
|
||||
|
||||
type ValidateAttestationDataFn = (attData: phase0.AttestationData) => boolean;
|
||||
/**
|
||||
* Invalid attestation data reasons, this is useful to track in metrics.
|
||||
*/
|
||||
export enum InvalidAttestationData {
|
||||
InvalidTargetEpoch = "invalid_target_epoch",
|
||||
InvalidSourceCheckPoint = "invalid_source_checkpoint",
|
||||
BlockNotInForkChoice = "block_not_in_fork_choice",
|
||||
CannotGetShufflingDependentRoot = "cannot_get_shuffling_dependent_root",
|
||||
IncorrectDependentRoot = "incorrect_dependent_root",
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate attestation data for inclusion in a block.
|
||||
* Returns InvalidAttestationData if attestation data is invalid, null otherwise.
|
||||
*/
|
||||
type ValidateAttestationDataFn = (attData: phase0.AttestationData) => InvalidAttestationData | null;
|
||||
|
||||
/**
|
||||
* Limit the max attestations with the same AttestationData.
|
||||
@@ -272,7 +287,7 @@ export class AggregatedAttestationPool {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!validateAttestationDataFn(attestationGroup.data)) {
|
||||
if (validateAttestationDataFn(attestationGroup.data) !== null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -381,8 +396,11 @@ export class AggregatedAttestationPool {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!validateAttestationDataFn(allAttestationGroups[0].data)) {
|
||||
this.metrics?.opPool.aggregatedAttestationPool.packedAttestations.invalidAttestationData.inc();
|
||||
const invalidAttDataReason = validateAttestationDataFn(allAttestationGroups[0].data);
|
||||
if (invalidAttDataReason !== null) {
|
||||
this.metrics?.opPool.aggregatedAttestationPool.packedAttestations.invalidAttestationData.inc({
|
||||
reason: invalidAttDataReason,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -912,20 +930,22 @@ export function extractParticipationPhase0(
|
||||
}
|
||||
|
||||
/**
|
||||
* This returns a function to validate if an attestation data is compatible to a state,
|
||||
* it's an optimized version of isValidAttestationData().
|
||||
* Atttestation data is validated by:
|
||||
* This returns a function to validate if an attestation data is compatible to a state.
|
||||
*
|
||||
* Attestation data is validated by:
|
||||
* - Validate the source checkpoint
|
||||
* - Validate shuffling using beacon block root and target epoch
|
||||
*
|
||||
* Here we always validate the source checkpoint, and cache beacon block root + target epoch
|
||||
* to avoid running the same shuffling validation multiple times.
|
||||
*
|
||||
* See also: https://github.com/ChainSafe/lodestar/issues/4333
|
||||
*/
|
||||
export function getValidateAttestationDataFn(
|
||||
forkChoice: IForkChoice,
|
||||
state: CachedBeaconStateAllForks
|
||||
): ValidateAttestationDataFn {
|
||||
const cachedValidatedAttestationData = new Map<string, boolean>();
|
||||
const cachedValidatedAttestationData = new Map<string, InvalidAttestationData | null>();
|
||||
const {previousJustifiedCheckpoint, currentJustifiedCheckpoint} = state;
|
||||
const stateEpoch = state.epochCtx.epoch;
|
||||
return (attData: phase0.AttestationData) => {
|
||||
@@ -937,75 +957,42 @@ export function getValidateAttestationDataFn(
|
||||
} else if (targetEpoch === stateEpoch - 1) {
|
||||
justifiedCheckpoint = previousJustifiedCheckpoint;
|
||||
} else {
|
||||
return false;
|
||||
return InvalidAttestationData.InvalidTargetEpoch;
|
||||
}
|
||||
|
||||
if (!ssz.phase0.Checkpoint.equals(attData.source, justifiedCheckpoint)) {
|
||||
return false;
|
||||
return InvalidAttestationData.InvalidSourceCheckPoint;
|
||||
}
|
||||
|
||||
// Shuffling can't have changed if we're in the first few epochs
|
||||
// Also we can't look back 2 epochs if target epoch is 1 or less
|
||||
if (stateEpoch < 2 || targetEpoch < 2) {
|
||||
return true;
|
||||
// null means valid
|
||||
return null;
|
||||
}
|
||||
|
||||
// the isValidAttestationData does not depend on slot and index
|
||||
// valid attestation data does not depend on slot and index
|
||||
const beaconBlockRootHex = toRootHex(attData.beaconBlockRoot);
|
||||
const cacheKey = beaconBlockRootHex + targetEpoch;
|
||||
let isValid = cachedValidatedAttestationData.get(cacheKey);
|
||||
if (isValid === undefined) {
|
||||
isValid = isValidShuffling(forkChoice, state, beaconBlockRootHex, targetEpoch);
|
||||
cachedValidatedAttestationData.set(cacheKey, isValid);
|
||||
let invalidReasonOrNull = cachedValidatedAttestationData.get(cacheKey);
|
||||
if (invalidReasonOrNull === undefined) {
|
||||
invalidReasonOrNull = isValidShuffling(forkChoice, state, beaconBlockRootHex, targetEpoch);
|
||||
cachedValidatedAttestationData.set(cacheKey, invalidReasonOrNull);
|
||||
}
|
||||
return isValid;
|
||||
return invalidReasonOrNull;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* A straight forward version to validate attestation data. We don't use it, but keep it here for reference.
|
||||
* - Validate the source checkpoint
|
||||
* - Since we validated attestation's signature in gossip validation function,
|
||||
* we only need to validate the shuffling of attestation
|
||||
* is compatible to this state.
|
||||
* (see https://github.com/ChainSafe/lodestar/issues/4333)
|
||||
* @returns
|
||||
* Validate the shuffling of an attestation data against the current state.
|
||||
* Return `null` if the shuffling is valid, otherwise return an `InvalidAttestationData` reason.
|
||||
*/
|
||||
export function isValidAttestationData(
|
||||
forkChoice: IForkChoice,
|
||||
state: CachedBeaconStateAllForks,
|
||||
data: phase0.AttestationData
|
||||
): boolean {
|
||||
const {previousJustifiedCheckpoint, currentJustifiedCheckpoint} = state;
|
||||
let justifiedCheckpoint: phase0.Checkpoint;
|
||||
const stateEpoch = state.epochCtx.epoch;
|
||||
const targetEpoch = data.target.epoch;
|
||||
|
||||
if (targetEpoch === stateEpoch) {
|
||||
justifiedCheckpoint = currentJustifiedCheckpoint;
|
||||
} else if (targetEpoch === stateEpoch - 1) {
|
||||
justifiedCheckpoint = previousJustifiedCheckpoint;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!ssz.phase0.Checkpoint.equals(data.source, justifiedCheckpoint)) return false;
|
||||
|
||||
// Shuffling can't have changed if we're in the first few epochs
|
||||
// Also we can't look back 2 epochs if target epoch is 1 or less
|
||||
if (stateEpoch < 2 || targetEpoch < 2) {
|
||||
return true;
|
||||
}
|
||||
const beaconBlockRootHex = toRootHex(data.beaconBlockRoot);
|
||||
return isValidShuffling(forkChoice, state, beaconBlockRootHex, targetEpoch);
|
||||
}
|
||||
|
||||
function isValidShuffling(
|
||||
forkChoice: IForkChoice,
|
||||
state: CachedBeaconStateAllForks,
|
||||
blockRootHex: RootHex,
|
||||
targetEpoch: Epoch
|
||||
): boolean {
|
||||
): InvalidAttestationData | null {
|
||||
// Otherwise the shuffling is determined by the block at the end of the target epoch
|
||||
// minus the shuffling lookahead (usually 2). We call this the "pivot".
|
||||
const pivotSlot = computeStartSlotAtEpoch(targetEpoch - 1) - 1;
|
||||
@@ -1018,7 +1005,7 @@ function isValidShuffling(
|
||||
const beaconBlockRootHex = blockRootHex;
|
||||
const beaconBlock = forkChoice.getBlockHex(beaconBlockRootHex);
|
||||
if (!beaconBlock) {
|
||||
throw Error(`Attestation data.beaconBlockRoot ${beaconBlockRootHex} not found in forkchoice`);
|
||||
return InvalidAttestationData.BlockNotInForkChoice;
|
||||
}
|
||||
|
||||
let attestationDependentRoot: string;
|
||||
@@ -1035,7 +1022,13 @@ function isValidShuffling(
|
||||
// getDependent root may throw error if the dependent root of attestation data is prior to finalized slot
|
||||
// ignore this attestation data in that case since we're not sure it's compatible to the state
|
||||
// see https://github.com/ChainSafe/lodestar/issues/4743
|
||||
return false;
|
||||
return InvalidAttestationData.CannotGetShufflingDependentRoot;
|
||||
}
|
||||
return attestationDependentRoot === stateDependentRoot;
|
||||
|
||||
if (attestationDependentRoot !== stateDependentRoot) {
|
||||
return InvalidAttestationData.IncorrectDependentRoot;
|
||||
}
|
||||
|
||||
// If the dependent root matches, then the shuffling is valid.
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -3,7 +3,10 @@ import {BitArray} from "@chainsafe/ssz";
|
||||
import {SYNC_COMMITTEE_SIZE, SYNC_COMMITTEE_SUBNET_SIZE} from "@lodestar/params";
|
||||
import {G2_POINT_AT_INFINITY} from "@lodestar/state-transition";
|
||||
import {Root, Slot, SubnetID, altair, ssz} from "@lodestar/types";
|
||||
import {MapDef, toRootHex} from "@lodestar/utils";
|
||||
import {Logger, MapDef, toRootHex} from "@lodestar/utils";
|
||||
import {MAXIMUM_GOSSIP_CLOCK_DISPARITY} from "../../constants/constants.js";
|
||||
import {Metrics} from "../../metrics/metrics.js";
|
||||
import {IClock} from "../../util/clock.js";
|
||||
import {InsertOutcome, OpPoolError, OpPoolErrorCode} from "./types.js";
|
||||
import {pruneBySlot, signatureFromBytesNoCheck} from "./utils.js";
|
||||
|
||||
@@ -47,11 +50,16 @@ export class SyncContributionAndProofPool {
|
||||
|
||||
private lowestPermissibleSlot = 0;
|
||||
|
||||
constructor() {
|
||||
constructor(
|
||||
private readonly clock: IClock,
|
||||
private readonly metrics: Metrics | null = null,
|
||||
private logger: Logger | null = null
|
||||
) {
|
||||
// Param guarantee for optimizations below that merge syncSubcommitteeBits as bytes
|
||||
if (SYNC_COMMITTEE_SUBNET_SIZE % 8 !== 0) {
|
||||
throw Error("SYNC_COMMITTEE_SUBNET_SIZE must be multiple of 8");
|
||||
}
|
||||
metrics?.opPool.syncContributionAndProofPool.size.addCollect(() => this.onScrapeMetrics(metrics));
|
||||
}
|
||||
|
||||
/** Returns current count of unique SyncContributionFast by block root and subnet */
|
||||
@@ -68,7 +76,11 @@ export class SyncContributionAndProofPool {
|
||||
/**
|
||||
* Only call this once we pass all validation.
|
||||
*/
|
||||
add(contributionAndProof: altair.ContributionAndProof, syncCommitteeParticipants: number): InsertOutcome {
|
||||
add(
|
||||
contributionAndProof: altair.ContributionAndProof,
|
||||
syncCommitteeParticipants: number,
|
||||
priority?: boolean
|
||||
): InsertOutcome {
|
||||
const {contribution} = contributionAndProof;
|
||||
const {slot, beaconBlockRoot} = contribution;
|
||||
const rootHex = toRootHex(beaconBlockRoot);
|
||||
@@ -78,6 +90,12 @@ export class SyncContributionAndProofPool {
|
||||
return InsertOutcome.Old;
|
||||
}
|
||||
|
||||
// Reject ContributionAndProofs of previous slots
|
||||
// for api ContributionAndProofs, we allow them to be added to the pool
|
||||
if (!priority && slot < this.clock.slotWithPastTolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY)) {
|
||||
return InsertOutcome.Late;
|
||||
}
|
||||
|
||||
// Limit object per slot
|
||||
const bestContributionBySubnetByRoot = this.bestContributionBySubnetRootBySlot.getOrDefault(slot);
|
||||
if (bestContributionBySubnetByRoot.size >= MAX_ITEMS_PER_SLOT) {
|
||||
@@ -95,12 +113,25 @@ export class SyncContributionAndProofPool {
|
||||
}
|
||||
|
||||
/**
|
||||
* This is for the block factory, the same to process_sync_committee_contributions in the spec.
|
||||
* This is for producing blocks, the same to process_sync_committee_contributions in the spec.
|
||||
*/
|
||||
getAggregate(slot: Slot, prevBlockRoot: Root): altair.SyncAggregate {
|
||||
const bestContributionBySubnet = this.bestContributionBySubnetRootBySlot.get(slot)?.get(toRootHex(prevBlockRoot));
|
||||
if (!bestContributionBySubnet || bestContributionBySubnet.size === 0) {
|
||||
// TODO: Add metric for missing SyncAggregate
|
||||
const opPoolMetrics = this.metrics?.opPool.syncContributionAndProofPool;
|
||||
const bestContributionBySubnetByRoot = this.bestContributionBySubnetRootBySlot.getOrDefault(slot);
|
||||
opPoolMetrics?.getAggregateRoots.set(bestContributionBySubnetByRoot.size);
|
||||
const prevBlockRootHex = toRootHex(prevBlockRoot);
|
||||
const bestContributionBySubnet = bestContributionBySubnetByRoot.getOrDefault(prevBlockRootHex);
|
||||
opPoolMetrics?.getAggregateSubnets.set(bestContributionBySubnet.size);
|
||||
|
||||
if (bestContributionBySubnet.size === 0) {
|
||||
opPoolMetrics?.getAggregateReturnsEmpty.inc();
|
||||
// this may happen, see https://github.com/ChainSafe/lodestar/issues/7299
|
||||
const availableRoots = Array.from(bestContributionBySubnetByRoot.keys()).join(",");
|
||||
this.logger?.warn("SyncContributionAndProofPool.getAggregate: no contributions for root", {
|
||||
slot,
|
||||
root: prevBlockRootHex,
|
||||
availableRoots,
|
||||
});
|
||||
// Must return signature as G2_POINT_AT_INFINITY when participating bits are empty
|
||||
// https://github.com/ethereum/consensus-specs/blob/30f2a076377264677e27324a8c3c78c590ae5e20/specs/altair/bls.md#eth2_fast_aggregate_verify
|
||||
return {
|
||||
@@ -109,7 +140,7 @@ export class SyncContributionAndProofPool {
|
||||
};
|
||||
}
|
||||
|
||||
return aggregate(bestContributionBySubnet);
|
||||
return aggregate(bestContributionBySubnet, this.metrics);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -121,6 +152,24 @@ export class SyncContributionAndProofPool {
|
||||
pruneBySlot(this.bestContributionBySubnetRootBySlot, headSlot, SLOTS_RETAINED);
|
||||
this.lowestPermissibleSlot = Math.max(headSlot - SLOTS_RETAINED, 0);
|
||||
}
|
||||
|
||||
private onScrapeMetrics(metrics: Metrics): void {
|
||||
const poolMetrics = metrics.opPool.syncContributionAndProofPool;
|
||||
poolMetrics.size.set(this.size);
|
||||
const previousSlot = this.clock.currentSlot - 1;
|
||||
const contributionBySubnetByBlockRoot = this.bestContributionBySubnetRootBySlot.getOrDefault(previousSlot);
|
||||
poolMetrics.blockRootsPerSlot.set(contributionBySubnetByBlockRoot.size);
|
||||
let index = 0;
|
||||
for (const contributionsBySubnet of contributionBySubnetByBlockRoot.values()) {
|
||||
let participationCount = 0;
|
||||
for (const contribution of contributionsBySubnet.values()) {
|
||||
participationCount += contribution.numParticipants;
|
||||
}
|
||||
poolMetrics.subnetsByBlockRoot.set({index}, contributionsBySubnet.size);
|
||||
poolMetrics.participantsByBlockRoot.set({index}, participationCount);
|
||||
index++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -163,12 +212,17 @@ export function contributionToFast(
|
||||
* Aggregate best contributions of each subnet into SyncAggregate
|
||||
* @returns SyncAggregate to be included in block body.
|
||||
*/
|
||||
export function aggregate(bestContributionBySubnet: Map<number, SyncContributionFast>): altair.SyncAggregate {
|
||||
export function aggregate(
|
||||
bestContributionBySubnet: Map<number, SyncContributionFast>,
|
||||
metrics: Metrics | null = null
|
||||
): altair.SyncAggregate {
|
||||
// check for empty/undefined bestContributionBySubnet earlier
|
||||
const syncCommitteeBits = BitArray.fromBitLen(SYNC_COMMITTEE_SIZE);
|
||||
|
||||
const signatures: Signature[] = [];
|
||||
let participationCount = 0;
|
||||
for (const [subnet, bestContribution] of bestContributionBySubnet.entries()) {
|
||||
participationCount += bestContribution.numParticipants;
|
||||
const byteOffset = subnet * SYNC_COMMITTEE_SUBNET_BYTES;
|
||||
|
||||
for (let i = 0; i < SYNC_COMMITTEE_SUBNET_BYTES; i++) {
|
||||
@@ -177,6 +231,8 @@ export function aggregate(bestContributionBySubnet: Map<number, SyncContribution
|
||||
|
||||
signatures.push(signatureFromBytesNoCheck(bestContribution.syncSubcommitteeSignature));
|
||||
}
|
||||
|
||||
metrics?.opPool.syncContributionAndProofPool.getAggregateParticipants.set(participationCount);
|
||||
return {
|
||||
syncCommitteeBits,
|
||||
syncCommitteeSignature: aggregateSignatures(signatures).toBytes(),
|
||||
|
||||
@@ -13,7 +13,7 @@ export enum InsertOutcome {
|
||||
Old = "Old",
|
||||
/** The pool has reached its limit. No changes were made. */
|
||||
ReachLimit = "ReachLimit",
|
||||
/** Attestation comes to the pool at > 2/3 of slot. No changes were made */
|
||||
/** Messages don't bring any value, for example attestations come to the pool at > 2/3 of slot. No changes were made */
|
||||
Late = "Late",
|
||||
/** The data is know, and the new participants have been added to the aggregated signature */
|
||||
Aggregated = "Aggregated",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import {BitArray} from "@chainsafe/ssz";
|
||||
import {Epoch, RootHex} from "@lodestar/types";
|
||||
import {CommitteeIndex, Epoch, RootHex} from "@lodestar/types";
|
||||
import {MapDef} from "@lodestar/utils";
|
||||
import {Metrics} from "../../metrics/index.js";
|
||||
import {isSuperSetOrEqual} from "../../util/bitArray.js";
|
||||
@@ -36,15 +36,29 @@ export class SeenAggregatedAttestations {
|
||||
* Array of AttestingIndices by same attestation data root by epoch.
|
||||
* Note that there are at most TARGET_AGGREGATORS_PER_COMMITTEE (16) per attestation data.
|
||||
* */
|
||||
private readonly aggregateRootsByEpoch = new MapDef<Epoch, MapDef<RootHex, AggregationInfo[]>>(
|
||||
() => new MapDef<RootHex, AggregationInfo[]>(() => [])
|
||||
private readonly aggregateRootsByEpoch = new MapDef<
|
||||
Epoch,
|
||||
MapDef<CommitteeIndex, MapDef<RootHex, AggregationInfo[]>>
|
||||
>(
|
||||
() =>
|
||||
new MapDef<CommitteeIndex, MapDef<RootHex, AggregationInfo[]>>(
|
||||
() => new MapDef<RootHex, AggregationInfo[]>(() => [])
|
||||
)
|
||||
);
|
||||
private lowestPermissibleEpoch: Epoch = 0;
|
||||
|
||||
constructor(private readonly metrics: Metrics | null) {}
|
||||
|
||||
isKnown(targetEpoch: Epoch, attDataRoot: RootHex, aggregationBits: BitArray): boolean {
|
||||
const seenAggregationInfoArr = this.aggregateRootsByEpoch.getOrDefault(targetEpoch).getOrDefault(attDataRoot);
|
||||
isKnown(
|
||||
targetEpoch: Epoch,
|
||||
committeeIndex: CommitteeIndex,
|
||||
attDataRoot: RootHex,
|
||||
aggregationBits: BitArray
|
||||
): boolean {
|
||||
const seenAggregationInfoArr = this.aggregateRootsByEpoch
|
||||
.getOrDefault(targetEpoch)
|
||||
.getOrDefault(committeeIndex)
|
||||
.getOrDefault(attDataRoot);
|
||||
this.metrics?.seenCache.aggregatedAttestations.isKnownCalls.inc();
|
||||
|
||||
for (let i = 0; i < seenAggregationInfoArr.length; i++) {
|
||||
@@ -59,13 +73,22 @@ export class SeenAggregatedAttestations {
|
||||
return false;
|
||||
}
|
||||
|
||||
add(targetEpoch: Epoch, attDataRoot: RootHex, newItem: AggregationInfo, checkIsKnown: boolean): void {
|
||||
add(
|
||||
targetEpoch: Epoch,
|
||||
committeeIndex: CommitteeIndex,
|
||||
attDataRoot: RootHex,
|
||||
newItem: AggregationInfo,
|
||||
checkIsKnown: boolean
|
||||
): void {
|
||||
const {aggregationBits} = newItem;
|
||||
if (checkIsKnown && this.isKnown(targetEpoch, attDataRoot, aggregationBits)) {
|
||||
if (checkIsKnown && this.isKnown(targetEpoch, committeeIndex, attDataRoot, aggregationBits)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const seenAggregationInfoArr = this.aggregateRootsByEpoch.getOrDefault(targetEpoch).getOrDefault(attDataRoot);
|
||||
const seenAggregationInfoArr = this.aggregateRootsByEpoch
|
||||
.getOrDefault(targetEpoch)
|
||||
.getOrDefault(committeeIndex)
|
||||
.getOrDefault(attDataRoot);
|
||||
insertDesc(seenAggregationInfoArr, newItem);
|
||||
}
|
||||
|
||||
|
||||
304
packages/beacon-node/src/chain/seenCache/seenBlockInput.ts
Normal file
304
packages/beacon-node/src/chain/seenCache/seenBlockInput.ts
Normal file
@@ -0,0 +1,304 @@
|
||||
import {ChainForkConfig} from "@lodestar/config";
|
||||
import {CheckpointWithHex} from "@lodestar/fork-choice";
|
||||
import {ForkName, isForkPostDeneb} from "@lodestar/params";
|
||||
import {computeStartSlotAtEpoch} from "@lodestar/state-transition";
|
||||
import {RootHex, SignedBeaconBlock, Slot, deneb} from "@lodestar/types";
|
||||
import {LodestarError, Logger, toRootHex} from "@lodestar/utils";
|
||||
import {Metrics} from "../../metrics/metrics.js";
|
||||
import {IClock} from "../../util/clock.js";
|
||||
import {
|
||||
BlockInputBlobs,
|
||||
BlockInputPreData,
|
||||
DAType,
|
||||
ForkBlobsDA,
|
||||
IBlockInput,
|
||||
LogMetaBasic,
|
||||
LogMetaBlobs,
|
||||
SourceMeta,
|
||||
isBlockInputBlobs,
|
||||
isDaOutOfRange,
|
||||
} from "../blocks/blockInput/index.js";
|
||||
import {ChainEvent, ChainEventEmitter} from "../emitter.js";
|
||||
|
||||
const MAX_BLOCK_INPUT_CACHE_SIZE = 5;
|
||||
|
||||
export type SeenBlockInputCacheModules = {
|
||||
config: ChainForkConfig;
|
||||
clock: IClock;
|
||||
chainEvents: ChainEventEmitter;
|
||||
signal: AbortSignal;
|
||||
// custodyConfig: CustodyConfig;
|
||||
metrics: Metrics | null;
|
||||
logger?: Logger;
|
||||
};
|
||||
|
||||
export type GetByBlobOptions = {
|
||||
throwErrorIfAlreadyKnown?: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
* Consumers that create BlockInputs or change types of old BlockInputs
|
||||
*
|
||||
* - gossipHandlers (block and blob)
|
||||
* - beaconBlocksMaybeBlobsByRange
|
||||
* - unavailableBeaconBlobsByRoot (beaconBlocksMaybeBlobsByRoot)
|
||||
* - publishBlock in the beacon/blocks/index.ts API
|
||||
* https://github.com/ChainSafe/lodestar/blob/unstable/packages/beacon-node/src/api/impl/beacon/blocks/index.ts#L62
|
||||
* - maybeValidateBlobs in verifyBlocksDataAvailability (is_data_available spec function)
|
||||
* https://github.com/ChainSafe/lodestar/blob/unstable/packages/beacon-node/src/chain/blocks/verifyBlocksDataAvailability.ts#L111
|
||||
*
|
||||
*
|
||||
* Pruning management for SeenBlockInputCache
|
||||
* ------------------------------------------
|
||||
* There are four cases for how pruning needs to be handled
|
||||
* - Normal operation following head via gossip (and/or reqresp). For this situation the consumer (process pipeline or
|
||||
* caller of processBlock) will call the `prune` method to remove any processed BlockInputs from the cache. This will
|
||||
* also remove any ancestors of the processed BlockInput as that will also need to have been successfully processed
|
||||
* for import to work correctly
|
||||
* - onFinalized event handler will help to prune any non-canonical forks once the chain finalizes. Any block-slots that
|
||||
* are before the finalized checkpoint will be pruned.
|
||||
* - Range-sync periods. The range process uses this cache to store and sync blocks with DA data as the chain is pulled
|
||||
* from peers. We pull batches, by epoch, so 32 slots are pulled at a time and several batches are pulled concurrently.
|
||||
* It is important to set the MAX_BLOCK_INPUT_CACHE_SIZE high enough to support range sync activities. Currently the
|
||||
* value is set for 5 batches of 32 slots. As process block is called (similar to following head) the BlockInput and
|
||||
* its ancestors will be pruned.
|
||||
* - Non-Finality times. This is a bit more tricky. There can be long periods of non-finality and storing everything
|
||||
* will cause OOM. The pruneToMax will help ensure a hard limit on the number of stored blocks (with DA) that are held
|
||||
* in memory at any one time. The value for MAX_BLOCK_INPUT_CACHE_SIZE is set to accommodate range-sync but in
|
||||
* practice this value may need to be massaged in the future if we find issues when debugging non-finality
|
||||
*/
|
||||
|
||||
export class SeenBlockInputCache {
|
||||
private readonly config: ChainForkConfig;
|
||||
private readonly clock: IClock;
|
||||
private readonly chainEvents: ChainEventEmitter;
|
||||
private readonly signal: AbortSignal;
|
||||
private readonly metrics: Metrics | null;
|
||||
private readonly logger?: Logger;
|
||||
private blockInputs = new Map<RootHex, IBlockInput>();
|
||||
|
||||
constructor({config, clock, chainEvents, signal, metrics, logger}: SeenBlockInputCacheModules) {
|
||||
this.config = config;
|
||||
this.clock = clock;
|
||||
this.chainEvents = chainEvents;
|
||||
this.signal = signal;
|
||||
this.metrics = metrics;
|
||||
this.logger = logger;
|
||||
|
||||
if (metrics) {
|
||||
metrics.seenCache.blockInput.blockInputCount.addCollect(() =>
|
||||
metrics.seenCache.blockInput.blockInputCount.set(this.blockInputs.size)
|
||||
);
|
||||
}
|
||||
|
||||
this.chainEvents.on(ChainEvent.forkChoiceFinalized, this.onFinalized);
|
||||
this.signal.addEventListener("abort", () => {
|
||||
this.chainEvents.off(ChainEvent.forkChoiceFinalized, this.onFinalized);
|
||||
});
|
||||
}
|
||||
|
||||
has(rootHex: RootHex): boolean {
|
||||
return this.blockInputs.has(rootHex);
|
||||
}
|
||||
|
||||
get(rootHex: RootHex): IBlockInput | undefined {
|
||||
return this.blockInputs.get(rootHex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the single BlockInput from the cache
|
||||
*/
|
||||
remove(rootHex: RootHex): void {
|
||||
this.blockInputs.delete(rootHex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a processed BlockInput from the cache and also removes any ancestors of processed blocks
|
||||
*/
|
||||
prune(rootHex: RootHex): void {
|
||||
let blockInput = this.blockInputs.get(rootHex);
|
||||
let parentRootHex = blockInput?.parentRootHex;
|
||||
while (blockInput) {
|
||||
this.blockInputs.delete(blockInput.blockRootHex);
|
||||
blockInput = this.blockInputs.get(parentRootHex ?? "");
|
||||
parentRootHex = blockInput?.parentRootHex;
|
||||
}
|
||||
this.pruneToMaxSize();
|
||||
}
|
||||
|
||||
onFinalized = (checkpoint: CheckpointWithHex) => {
|
||||
const cutoffSlot = computeStartSlotAtEpoch(checkpoint.epoch);
|
||||
for (const [rootHex, blockInput] of this.blockInputs) {
|
||||
if (blockInput.slot < cutoffSlot) {
|
||||
this.blockInputs.delete(rootHex);
|
||||
}
|
||||
}
|
||||
this.pruneToMaxSize();
|
||||
};
|
||||
|
||||
getByBlock({block, source, seenTimestampSec, peerIdStr}: SourceMeta & {block: SignedBeaconBlock}): IBlockInput {
|
||||
const blockRoot = this.config.getForkTypes(block.message.slot).BeaconBlock.hashTreeRoot(block.message);
|
||||
const blockRootHex = toRootHex(blockRoot);
|
||||
|
||||
// TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below
|
||||
let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput;
|
||||
if (!blockInput) {
|
||||
const {forkName, daOutOfRange} = this.buildCommonProps(block.message.slot);
|
||||
if (!isForkPostDeneb(forkName)) {
|
||||
blockInput = BlockInputPreData.createFromBlock({
|
||||
block,
|
||||
blockRootHex,
|
||||
daOutOfRange,
|
||||
forkName,
|
||||
source: {
|
||||
source,
|
||||
seenTimestampSec,
|
||||
peerIdStr,
|
||||
},
|
||||
});
|
||||
}
|
||||
// else if (isForkPostFulu(forkName)) {
|
||||
// blockInput = new BlockInputColumns.createFromBlock({
|
||||
// block,
|
||||
// blockRootHex,
|
||||
// daOutOfRange,
|
||||
// forkName,
|
||||
// custodyColumns: this.custodyConfig.custodyColumns,
|
||||
// sampledColumns: this.custodyConfig.sampledColumns,
|
||||
// source: {
|
||||
// source,
|
||||
// seenTimestampSec,
|
||||
// peerIdStr
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
else {
|
||||
blockInput = BlockInputBlobs.createFromBlock({
|
||||
block: block as SignedBeaconBlock<ForkBlobsDA>,
|
||||
blockRootHex,
|
||||
daOutOfRange,
|
||||
forkName,
|
||||
source: {
|
||||
source,
|
||||
seenTimestampSec,
|
||||
peerIdStr,
|
||||
},
|
||||
});
|
||||
}
|
||||
this.blockInputs.set(blockInput.blockRootHex, blockInput);
|
||||
}
|
||||
|
||||
if (!blockInput.hasBlock()) {
|
||||
blockInput.addBlock({block, blockRootHex, source: {source, seenTimestampSec, peerIdStr}});
|
||||
} else {
|
||||
this.logger?.debug("Attempt to cache block but is already cached on BlockInput", blockInput.getLogMeta());
|
||||
this.metrics?.seenCache.blockInput.duplicateBlockCount.inc({source});
|
||||
}
|
||||
|
||||
return blockInput;
|
||||
}
|
||||
|
||||
getByBlob(
|
||||
{blobSidecar, source, seenTimestampSec, peerIdStr}: SourceMeta & {blobSidecar: deneb.BlobSidecar},
|
||||
opts: GetByBlobOptions = {}
|
||||
): BlockInputBlobs {
|
||||
const blockRoot = this.config
|
||||
.getForkTypes(blobSidecar.signedBlockHeader.message.slot)
|
||||
.BeaconBlockHeader.hashTreeRoot(blobSidecar.signedBlockHeader.message);
|
||||
const blockRootHex = toRootHex(blockRoot);
|
||||
|
||||
// TODO(peerDAS): Why is it necessary to static cast this here. All conditional paths result in a valid value so should be defined correctly below
|
||||
let blockInput = this.blockInputs.get(blockRootHex) as IBlockInput;
|
||||
let created = false;
|
||||
if (!blockInput) {
|
||||
created = true;
|
||||
const {forkName, daOutOfRange} = this.buildCommonProps(blobSidecar.signedBlockHeader.message.slot);
|
||||
blockInput = BlockInputBlobs.createFromBlob({
|
||||
blobSidecar,
|
||||
blockRootHex,
|
||||
daOutOfRange,
|
||||
forkName,
|
||||
source,
|
||||
seenTimestampSec,
|
||||
peerIdStr,
|
||||
});
|
||||
this.metrics?.seenCache.blockInput.createdByBlob.inc();
|
||||
this.blockInputs.set(blockRootHex, blockInput);
|
||||
}
|
||||
|
||||
if (!isBlockInputBlobs(blockInput)) {
|
||||
throw new SeenBlockInputCacheError(
|
||||
{
|
||||
code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE,
|
||||
cachedType: blockInput.type,
|
||||
requestedType: DAType.Blobs,
|
||||
...blockInput.getLogMeta(),
|
||||
},
|
||||
`BlockInputType mismatch adding blobIndex=${blobSidecar.index}`
|
||||
);
|
||||
}
|
||||
|
||||
if (!blockInput.hasBlob(blobSidecar.index)) {
|
||||
blockInput.addBlob({blobSidecar, blockRootHex, source, seenTimestampSec, peerIdStr});
|
||||
} else if (!created) {
|
||||
this.logger?.debug(
|
||||
`Attempt to cache blob index #${blobSidecar.index} but is already cached on BlockInput`,
|
||||
blockInput.getLogMeta()
|
||||
);
|
||||
this.metrics?.seenCache.blockInput.duplicateBlobCount.inc({source});
|
||||
if (opts.throwErrorIfAlreadyKnown) {
|
||||
throw new SeenBlockInputCacheError({
|
||||
code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN,
|
||||
...blockInput.getLogMeta(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return blockInput;
|
||||
}
|
||||
|
||||
private buildCommonProps(slot: Slot): {
|
||||
daOutOfRange: boolean;
|
||||
forkName: ForkName;
|
||||
} {
|
||||
const forkName = this.config.getForkName(slot);
|
||||
return {
|
||||
forkName,
|
||||
daOutOfRange: isDaOutOfRange(this.config, forkName, slot, this.clock.currentEpoch),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Use custom implementation of pruneSetToMax to allow for sorting by slot
|
||||
* and deleting via key/rootHex
|
||||
*/
|
||||
private pruneToMaxSize() {
|
||||
let itemsToDelete = this.blockInputs.size - MAX_BLOCK_INPUT_CACHE_SIZE;
|
||||
|
||||
if (itemsToDelete > 0) {
|
||||
const sorted = [...this.blockInputs.entries()].sort((a, b) => b[1].slot - a[1].slot);
|
||||
for (const [rootHex] of sorted) {
|
||||
this.blockInputs.delete(rootHex);
|
||||
itemsToDelete--;
|
||||
if (itemsToDelete <= 0) return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum SeenBlockInputCacheErrorCode {
|
||||
WRONG_BLOCK_INPUT_TYPE = "BLOCK_INPUT_CACHE_ERROR_WRONG_BLOCK_INPUT_TYPE",
|
||||
GOSSIP_BLOB_ALREADY_KNOWN = "BLOCK_INPUT_CACHE_ERROR_GOSSIP_BLOB_ALREADY_KNOWN",
|
||||
}
|
||||
|
||||
type SeenBlockInputCacheErrorType =
|
||||
| (LogMetaBasic & {
|
||||
code: SeenBlockInputCacheErrorCode.WRONG_BLOCK_INPUT_TYPE;
|
||||
cachedType: DAType;
|
||||
requestedType: DAType;
|
||||
})
|
||||
| (LogMetaBlobs & {
|
||||
code: SeenBlockInputCacheErrorCode.GOSSIP_BLOB_ALREADY_KNOWN;
|
||||
});
|
||||
|
||||
class SeenBlockInputCacheError extends LodestarError<SeenBlockInputCacheErrorType> {}
|
||||
@@ -136,7 +136,7 @@ async function validateAggregateAndProof(
|
||||
: toRootHex(ssz.phase0.AttestationData.hashTreeRoot(attData));
|
||||
if (
|
||||
!skipValidationKnownAttesters &&
|
||||
chain.seenAggregatedAttestations.isKnown(targetEpoch, attDataRootHex, aggregationBits)
|
||||
chain.seenAggregatedAttestations.isKnown(targetEpoch, attIndex, attDataRootHex, aggregationBits)
|
||||
) {
|
||||
throw new AttestationError(GossipAction.IGNORE, {
|
||||
code: AttestationErrorCode.ATTESTERS_ALREADY_KNOWN,
|
||||
@@ -248,6 +248,7 @@ async function validateAggregateAndProof(
|
||||
chain.seenAggregators.add(targetEpoch, aggregatorIndex);
|
||||
chain.seenAggregatedAttestations.add(
|
||||
targetEpoch,
|
||||
attIndex,
|
||||
attDataRootHex,
|
||||
{aggregationBits, trueBitCount: attestingIndices.length},
|
||||
false
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
import {BlockInputSource} from "../../chain/blocks/blockInput/index.js";
|
||||
import {BlobsSource, BlockSource} from "../../chain/blocks/types.js";
|
||||
import {JobQueueItemType} from "../../chain/bls/index.js";
|
||||
import {AttestationErrorCode, BlockErrorCode} from "../../chain/errors/index.js";
|
||||
import {ScannedSlotsTerminationReason} from "../../chain/opPools/aggregatedAttestationPool.js";
|
||||
import {
|
||||
type InvalidAttestationData,
|
||||
ScannedSlotsTerminationReason,
|
||||
} from "../../chain/opPools/aggregatedAttestationPool.js";
|
||||
import {InsertOutcome} from "../../chain/opPools/types.js";
|
||||
import {RegenCaller, RegenFnName} from "../../chain/regen/interface.js";
|
||||
import {ReprocessStatus} from "../../chain/reprocess.js";
|
||||
@@ -866,9 +870,10 @@ export function createLodestarMetrics(
|
||||
name: "lodestar_oppool_aggregated_attestation_pool_packed_attestations_empty_attestation_data_total",
|
||||
help: "Total number of attestation data with no group when producing packed attestation",
|
||||
}),
|
||||
invalidAttestationData: register.gauge({
|
||||
invalidAttestationData: register.gauge<{reason: InvalidAttestationData}>({
|
||||
name: "lodestar_oppool_aggregated_attestation_pool_packed_attestations_invalid_attestation_data_total",
|
||||
help: "Total number of invalid attestation data when producing packed attestation",
|
||||
labelNames: ["reason"],
|
||||
}),
|
||||
seenCommittees: register.gauge({
|
||||
name: "lodestar_oppool_aggregated_attestation_pool_packed_attestations_seen_committees_total",
|
||||
@@ -921,10 +926,52 @@ export function createLodestarMetrics(
|
||||
help: "Total number of InsertOutcome as a result of adding a SyncCommitteeMessage to pool",
|
||||
labelNames: ["insertOutcome"],
|
||||
}),
|
||||
syncContributionAndProofPoolSize: register.gauge({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_pool_size",
|
||||
help: "Current size of the SyncContributionAndProofPool unique by slot subnet and block root",
|
||||
}),
|
||||
syncContributionAndProofPool: {
|
||||
size: register.gauge({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_size",
|
||||
help: "Current size of the SyncContributionAndProofPool unique by slot subnet and block root",
|
||||
}),
|
||||
gossipInsertOutcome: register.counter<{insertOutcome: InsertOutcome}>({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_gossip_insert_outcome_total",
|
||||
help: "Total number of InsertOutcome as a result of adding a ContributionAndProof from gossip into the pool",
|
||||
labelNames: ["insertOutcome"],
|
||||
}),
|
||||
apiInsertOutcome: register.counter<{insertOutcome: InsertOutcome}>({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_api_insert_outcome_total",
|
||||
help: "Total number of InsertOutcome as a result of adding a ContributionAndProof from api into the pool",
|
||||
labelNames: ["insertOutcome"],
|
||||
}),
|
||||
blockRootsPerSlot: register.gauge({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_block_roots_per_slot_total",
|
||||
help: "Total number of block roots per slot in SyncContributionAndProofPool",
|
||||
}),
|
||||
subnetsByBlockRoot: register.gauge<{index: number}>({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_subnets_by_block_root_total",
|
||||
help: "Total number of subnets per block root in SyncContributionAndProofPool",
|
||||
labelNames: ["index"],
|
||||
}),
|
||||
participantsByBlockRoot: register.gauge<{index: number}>({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_participants_by_block_root_total",
|
||||
help: "Total number of participants per block root in SyncContributionAndProofPool",
|
||||
labelNames: ["index"],
|
||||
}),
|
||||
getAggregateRoots: register.gauge({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_get_aggregate_roots_total",
|
||||
help: "Total number of block roots in SyncContributionAndProofPool.getAggregate(slot)",
|
||||
}),
|
||||
getAggregateSubnets: register.gauge({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_get_aggregate_subnets_total",
|
||||
help: "Total number of subnets in SyncContributionAndProofPool.getAggregate(slot, root)",
|
||||
}),
|
||||
getAggregateParticipants: register.gauge({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_get_aggregate_participants_total",
|
||||
help: "Total number of participants in SyncContributionAndProofPool.getAggregate(slot, root)",
|
||||
}),
|
||||
getAggregateReturnsEmpty: register.gauge({
|
||||
name: "lodestar_oppool_sync_contribution_and_proof_pool_get_aggregate_returns_empty_total",
|
||||
help: "Total number of empty returns in SyncContributionAndProofPool.getAggregate(slot, root)",
|
||||
}),
|
||||
},
|
||||
},
|
||||
|
||||
chain: {
|
||||
@@ -1200,6 +1247,30 @@ export function createLodestarMetrics(
|
||||
labelNames: ["reason"],
|
||||
}),
|
||||
},
|
||||
blockInput: {
|
||||
blockInputCount: register.gauge({
|
||||
name: "lodestar_seen_block_input_cache_size",
|
||||
help: "Number of cached BlockInputs",
|
||||
}),
|
||||
duplicateBlockCount: register.gauge<{source: BlockInputSource}>({
|
||||
name: "lodestar_seen_block_input_cache_duplicate_block_count",
|
||||
help: "Total number of duplicate blocks that pass validation and attempt to be cached but are known",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
duplicateBlobCount: register.gauge<{source: BlockInputSource}>({
|
||||
name: "lodestar_seen_block_input_cache_duplicate_blob_count",
|
||||
help: "Total number of duplicate blobs that pass validation and attempt to be cached but are known",
|
||||
labelNames: ["source"],
|
||||
}),
|
||||
createdByBlock: register.gauge({
|
||||
name: "lodestar_seen_block_input_cache_items_created_by_block",
|
||||
help: "Number of BlockInputs created via a block being seen first",
|
||||
}),
|
||||
createdByBlob: register.gauge({
|
||||
name: "lodestar_seen_block_input_cache_items_created_by_blob",
|
||||
help: "Number of BlockInputs created via a blob being seen first",
|
||||
}),
|
||||
},
|
||||
},
|
||||
|
||||
regenFnCallTotal: register.gauge<{entrypoint: RegenFnName; caller: RegenCaller}>({
|
||||
|
||||
@@ -756,9 +756,12 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
||||
contributionAndProof.message,
|
||||
syncCommitteeParticipantIndices
|
||||
);
|
||||
|
||||
try {
|
||||
chain.syncContributionAndProofPool.add(contributionAndProof.message, syncCommitteeParticipantIndices.length);
|
||||
const insertOutcome = chain.syncContributionAndProofPool.add(
|
||||
contributionAndProof.message,
|
||||
syncCommitteeParticipantIndices.length
|
||||
);
|
||||
metrics?.opPool.syncContributionAndProofPool.gossipInsertOutcome.inc({insertOutcome});
|
||||
} catch (e) {
|
||||
logger.error("Error adding to contributionAndProof pool", {}, e as Error);
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ export class BeaconNode {
|
||||
metricsRegistries = [],
|
||||
}: BeaconNodeInitModules): Promise<T> {
|
||||
if (hasher.name !== "hashtree") {
|
||||
throw Error(`Loaded incorrect hasher ${hasher.name}, expected hashtree`);
|
||||
logger.warn(`hashtree is not supported, using hasher ${hasher.name}`);
|
||||
}
|
||||
|
||||
const controller = new AbortController();
|
||||
|
||||
@@ -74,6 +74,7 @@ export function getMockedClock(): Mocked<IClock> {
|
||||
},
|
||||
currentSlotWithGossipDisparity: undefined,
|
||||
isCurrentSlotGivenGossipDisparity: vi.fn(),
|
||||
slotWithPastTolerance: vi.fn(),
|
||||
secFromSlot: vi.fn(),
|
||||
} as unknown as Mocked<IClock>;
|
||||
}
|
||||
|
||||
@@ -113,58 +113,63 @@ vi.mock("../../src/chain/opPools/index.js", async (importActual) => {
|
||||
vi.mock("../../src/chain/chain.js", async (importActual) => {
|
||||
const mod = await importActual<typeof import("../../src/chain/chain.js")>();
|
||||
|
||||
const BeaconChain = vi.fn().mockImplementation(({clock, genesisTime, config}: MockedBeaconChainOptions) => {
|
||||
const logger = getMockedLogger();
|
||||
const BeaconChain = vi
|
||||
.fn()
|
||||
.mockImplementation(({clock: clockParam, genesisTime, config}: MockedBeaconChainOptions) => {
|
||||
const logger = getMockedLogger();
|
||||
const clock =
|
||||
clockParam === "real"
|
||||
? new Clock({config, genesisTime, signal: new AbortController().signal})
|
||||
: getMockedClock();
|
||||
|
||||
return {
|
||||
config,
|
||||
opts: {},
|
||||
genesisTime,
|
||||
clock:
|
||||
clock === "real" ? new Clock({config, genesisTime, signal: new AbortController().signal}) : getMockedClock(),
|
||||
forkChoice: getMockedForkChoice(),
|
||||
executionEngine: {
|
||||
notifyForkchoiceUpdate: vi.fn(),
|
||||
getPayload: vi.fn(),
|
||||
getClientVersion: vi.fn(),
|
||||
},
|
||||
executionBuilder: {},
|
||||
// @ts-expect-error
|
||||
eth1: new Eth1ForBlockProduction(),
|
||||
opPool: new OpPool(),
|
||||
aggregatedAttestationPool: new AggregatedAttestationPool(config),
|
||||
syncContributionAndProofPool: new SyncContributionAndProofPool(),
|
||||
// @ts-expect-error
|
||||
beaconProposerCache: new BeaconProposerCache(),
|
||||
shufflingCache: new ShufflingCache(),
|
||||
pubkey2index: new PubkeyIndexMap(),
|
||||
index2pubkey: [],
|
||||
produceCommonBlockBody: vi.fn(),
|
||||
getProposerHead: vi.fn(),
|
||||
produceBlock: vi.fn(),
|
||||
produceBlindedBlock: vi.fn(),
|
||||
getCanonicalBlockAtSlot: vi.fn(),
|
||||
recomputeForkChoiceHead: vi.fn(),
|
||||
predictProposerHead: vi.fn(),
|
||||
getHeadStateAtCurrentEpoch: vi.fn(),
|
||||
getHeadState: vi.fn(),
|
||||
getStateBySlot: vi.fn(),
|
||||
updateBuilderStatus: vi.fn(),
|
||||
processBlock: vi.fn(),
|
||||
regenStateForAttestationVerification: vi.fn(),
|
||||
close: vi.fn(),
|
||||
logger,
|
||||
regen: new QueuedStateRegenerator({} as any),
|
||||
lightClientServer: new LightClientServer({} as any, {} as any),
|
||||
bls: {
|
||||
verifySignatureSets: vi.fn().mockResolvedValue(true),
|
||||
verifySignatureSetsSameMessage: vi.fn().mockResolvedValue([true]),
|
||||
close: vi.fn().mockResolvedValue(true),
|
||||
canAcceptWork: vi.fn().mockReturnValue(true),
|
||||
},
|
||||
emitter: new ChainEventEmitter(),
|
||||
};
|
||||
});
|
||||
return {
|
||||
config,
|
||||
opts: {},
|
||||
genesisTime,
|
||||
clock,
|
||||
forkChoice: getMockedForkChoice(),
|
||||
executionEngine: {
|
||||
notifyForkchoiceUpdate: vi.fn(),
|
||||
getPayload: vi.fn(),
|
||||
getClientVersion: vi.fn(),
|
||||
},
|
||||
executionBuilder: {},
|
||||
// @ts-expect-error
|
||||
eth1: new Eth1ForBlockProduction(),
|
||||
opPool: new OpPool(),
|
||||
aggregatedAttestationPool: new AggregatedAttestationPool(config),
|
||||
syncContributionAndProofPool: new SyncContributionAndProofPool(clock),
|
||||
// @ts-expect-error
|
||||
beaconProposerCache: new BeaconProposerCache(),
|
||||
shufflingCache: new ShufflingCache(),
|
||||
pubkey2index: new PubkeyIndexMap(),
|
||||
index2pubkey: [],
|
||||
produceCommonBlockBody: vi.fn(),
|
||||
getProposerHead: vi.fn(),
|
||||
produceBlock: vi.fn(),
|
||||
produceBlindedBlock: vi.fn(),
|
||||
getCanonicalBlockAtSlot: vi.fn(),
|
||||
recomputeForkChoiceHead: vi.fn(),
|
||||
predictProposerHead: vi.fn(),
|
||||
getHeadStateAtCurrentEpoch: vi.fn(),
|
||||
getHeadState: vi.fn(),
|
||||
getStateBySlot: vi.fn(),
|
||||
updateBuilderStatus: vi.fn(),
|
||||
processBlock: vi.fn(),
|
||||
regenStateForAttestationVerification: vi.fn(),
|
||||
close: vi.fn(),
|
||||
logger,
|
||||
regen: new QueuedStateRegenerator({} as any),
|
||||
lightClientServer: new LightClientServer({} as any, {} as any),
|
||||
bls: {
|
||||
verifySignatureSets: vi.fn().mockResolvedValue(true),
|
||||
verifySignatureSetsSameMessage: vi.fn().mockResolvedValue([true]),
|
||||
close: vi.fn().mockResolvedValue(true),
|
||||
canAcceptWork: vi.fn().mockReturnValue(true),
|
||||
},
|
||||
emitter: new ChainEventEmitter(),
|
||||
};
|
||||
});
|
||||
|
||||
return {
|
||||
...mod,
|
||||
|
||||
@@ -28,6 +28,7 @@ describe("SeenAggregatedAttestations perf test", () => {
|
||||
];
|
||||
|
||||
for (const {id, aggregationBits} of testCases) {
|
||||
const committeeIndex = 0;
|
||||
bench({
|
||||
id,
|
||||
beforeEach: () => {
|
||||
@@ -38,13 +39,13 @@ describe("SeenAggregatedAttestations perf test", () => {
|
||||
aggregationBits: toAggregationBitsSingleFalse(i),
|
||||
trueBitCount: numAttestersInByte * 8 - 1,
|
||||
};
|
||||
seenCache.add(targetEpoch, attDataRoot, aggregationInfo, false);
|
||||
seenCache.add(targetEpoch, committeeIndex, attDataRoot, aggregationInfo, false);
|
||||
}
|
||||
|
||||
return seenCache;
|
||||
},
|
||||
fn: (seenCache) => {
|
||||
seenCache.isKnown(targetEpoch, attDataRoot, aggregationBits);
|
||||
seenCache.isKnown(targetEpoch, committeeIndex, attDataRoot, aggregationBits);
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
35
packages/beacon-node/test/unit/api/impl/utils.test.ts
Normal file
35
packages/beacon-node/test/unit/api/impl/utils.test.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
import {describe, expect, it} from "vitest";
|
||||
import {ApiError} from "../../../../src/api/impl/errors.js";
|
||||
import {assertUniqueItems} from "../../../../src/api/impl/utils.js";
|
||||
|
||||
describe("api / impl / utils", () => {
|
||||
describe("assertUniqueItems", () => {
|
||||
it("should not throw for undefined input", () => {
|
||||
expect(() => assertUniqueItems(undefined, "test message")).not.toThrow();
|
||||
});
|
||||
|
||||
it("should not throw for empty array", () => {
|
||||
expect(() => assertUniqueItems([], "test message")).not.toThrow();
|
||||
});
|
||||
|
||||
it("should not throw for array with unique values", () => {
|
||||
expect(() => assertUniqueItems([1, 2, 3], "test message")).not.toThrow();
|
||||
expect(() => assertUniqueItems(["a", "b", "c"], "test message")).not.toThrow();
|
||||
expect(() => assertUniqueItems([true, false], "test message")).not.toThrow();
|
||||
});
|
||||
|
||||
it("should throw ApiError if array contains duplicate values", () => {
|
||||
expect(() => assertUniqueItems([1, 2, 1], "Duplicate values found")).toThrowError(ApiError);
|
||||
});
|
||||
|
||||
it("should throw if array contains duplicate values and list duplicates", () => {
|
||||
const errorMessage = "Duplicate values found";
|
||||
const errorMessageFn = (duplicateItems: unknown[]) => `${errorMessage}: ${duplicateItems.join(", ")}`;
|
||||
|
||||
expect(() => assertUniqueItems([1, 2, 1], errorMessage)).toThrow(errorMessageFn([1]));
|
||||
expect(() => assertUniqueItems([1, 2, 1, 2], errorMessage)).toThrow(errorMessageFn([1, 2]));
|
||||
expect(() => assertUniqueItems(["a", "b", "a"], errorMessage)).toThrow(errorMessageFn(["a"]));
|
||||
expect(() => assertUniqueItems([true, true], errorMessage)).toThrow(errorMessageFn([true]));
|
||||
});
|
||||
});
|
||||
});
|
||||
187
packages/beacon-node/test/unit/chain/blocks/blockInput.test.ts
Normal file
187
packages/beacon-node/test/unit/chain/blocks/blockInput.test.ts
Normal file
@@ -0,0 +1,187 @@
|
||||
import {createChainForkConfig, defaultChainConfig} from "@lodestar/config";
|
||||
import {ForkName, ForkPostCapella, ForkPostDeneb} from "@lodestar/params";
|
||||
import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition";
|
||||
import {SignedBeaconBlock, deneb, ssz} from "@lodestar/types";
|
||||
import {toRootHex} from "@lodestar/utils";
|
||||
import {describe, expect, it} from "vitest";
|
||||
import {
|
||||
AddBlob,
|
||||
AddBlock,
|
||||
BlockInputBlobs,
|
||||
BlockInputSource,
|
||||
CreateBlockInputMeta,
|
||||
ForkBlobsDA,
|
||||
} from "../../../../src/chain/blocks/blockInput/index.js";
|
||||
|
||||
const CAPELLA_FORK_EPOCH = 0;
|
||||
const DENEB_FORK_EPOCH = 1;
|
||||
const ELECTRA_FORK_EPOCH = 2;
|
||||
const FULU_FORK_EPOCH = 3;
|
||||
const config = createChainForkConfig({
|
||||
...defaultChainConfig,
|
||||
CAPELLA_FORK_EPOCH,
|
||||
DENEB_FORK_EPOCH,
|
||||
ELECTRA_FORK_EPOCH,
|
||||
FULU_FORK_EPOCH,
|
||||
});
|
||||
|
||||
const slots: Record<ForkPostCapella, number> = {
|
||||
capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH),
|
||||
deneb: computeStartSlotAtEpoch(DENEB_FORK_EPOCH),
|
||||
electra: computeStartSlotAtEpoch(ELECTRA_FORK_EPOCH),
|
||||
fulu: computeStartSlotAtEpoch(FULU_FORK_EPOCH),
|
||||
};
|
||||
|
||||
type BlockTestSet<F extends ForkPostCapella> = {
|
||||
block: SignedBeaconBlock<F>;
|
||||
blockRoot: Uint8Array;
|
||||
rootHex: string;
|
||||
};
|
||||
function buildBlockTestSet<F extends ForkPostCapella = ForkPostCapella>(forkName: F): BlockTestSet<F> {
|
||||
const block = ssz[forkName].SignedBeaconBlock.defaultValue();
|
||||
block.message.slot = slots[forkName];
|
||||
const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message as any);
|
||||
const rootHex = toRootHex(blockRoot);
|
||||
return {
|
||||
block,
|
||||
blockRoot,
|
||||
rootHex,
|
||||
};
|
||||
}
|
||||
|
||||
type BlockAndBlobTestSet<F extends ForkPostDeneb = ForkPostDeneb> = BlockTestSet<F> & {
|
||||
blobSidecars: deneb.BlobSidecars;
|
||||
};
|
||||
function buildBlockAndBlobsTestSet(forkName: ForkPostDeneb, numberOfBlobs: number): BlockAndBlobTestSet<ForkPostDeneb> {
|
||||
const {block, blockRoot, rootHex} = buildBlockTestSet<ForkPostDeneb>(forkName);
|
||||
const commitments = Array.from({length: numberOfBlobs}, () => Buffer.alloc(48, 0x77));
|
||||
block.message.body.blobKzgCommitments = commitments;
|
||||
const signedBlockHeader = signedBlockToSignedHeader(config, block);
|
||||
const blobSidecars: deneb.BlobSidecars = [];
|
||||
for (const kzgCommitment of commitments) {
|
||||
const blobSidecar = ssz[forkName].BlobSidecar.defaultValue();
|
||||
blobSidecar.index = blobSidecars.length;
|
||||
blobSidecar.signedBlockHeader = signedBlockHeader;
|
||||
blobSidecar.kzgCommitment = kzgCommitment;
|
||||
blobSidecars.push(blobSidecar);
|
||||
}
|
||||
|
||||
return {
|
||||
block,
|
||||
blockRoot,
|
||||
rootHex,
|
||||
blobSidecars,
|
||||
};
|
||||
}
|
||||
|
||||
const testCases: {name: string; blobCount: number; blobsBeforeBlock: number}[] = [
|
||||
{
|
||||
name: "no blobs",
|
||||
blobCount: 0,
|
||||
blobsBeforeBlock: 0,
|
||||
},
|
||||
{
|
||||
name: "1 blob, block first",
|
||||
blobCount: 1,
|
||||
blobsBeforeBlock: 0,
|
||||
},
|
||||
{
|
||||
name: "1 blob, blob first",
|
||||
blobCount: 1,
|
||||
blobsBeforeBlock: 1,
|
||||
},
|
||||
{
|
||||
name: "6 blobs, block first",
|
||||
blobCount: 6,
|
||||
blobsBeforeBlock: 0,
|
||||
},
|
||||
{
|
||||
name: "4 blobs, block in middle",
|
||||
blobCount: 4,
|
||||
blobsBeforeBlock: 2,
|
||||
},
|
||||
{
|
||||
name: "3 blobs, block in end",
|
||||
blobCount: 3,
|
||||
blobsBeforeBlock: 3,
|
||||
},
|
||||
];
|
||||
|
||||
type TestCaseArray = (AddBlock<ForkBlobsDA> | AddBlob) & CreateBlockInputMeta;
|
||||
|
||||
describe("BlockInput", () => {
|
||||
describe("Blob timing", () => {
|
||||
for (const {name, blobCount, blobsBeforeBlock} of testCases) {
|
||||
it(name, () => {
|
||||
const {block, rootHex, blobSidecars} = buildBlockAndBlobsTestSet(ForkName.deneb, blobCount);
|
||||
const testArray: TestCaseArray[] = [];
|
||||
for (let i = 0; i < blobsBeforeBlock; i++) {
|
||||
const blobSidecar = blobSidecars.shift();
|
||||
if (!blobSidecar) throw new Error("must have blobSidecar to add to TestCaseArray");
|
||||
testArray.push({
|
||||
blobSidecar,
|
||||
blockRootHex: rootHex,
|
||||
daOutOfRange: false,
|
||||
forkName: ForkName.deneb,
|
||||
seenTimestampSec: Date.now(),
|
||||
source: BlockInputSource.gossip,
|
||||
} as AddBlob & CreateBlockInputMeta);
|
||||
}
|
||||
testArray.push({
|
||||
block,
|
||||
blockRootHex: rootHex,
|
||||
daOutOfRange: false,
|
||||
forkName: ForkName.deneb,
|
||||
source: {
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
},
|
||||
} as AddBlock<ForkBlobsDA> & CreateBlockInputMeta);
|
||||
for (const blobSidecar of blobSidecars) {
|
||||
testArray.push({
|
||||
blobSidecar,
|
||||
blockRootHex: rootHex,
|
||||
daOutOfRange: false,
|
||||
forkName: ForkName.deneb,
|
||||
seenTimestampSec: Date.now(),
|
||||
source: BlockInputSource.gossip,
|
||||
} as AddBlob & CreateBlockInputMeta);
|
||||
}
|
||||
|
||||
let blockInput: BlockInputBlobs;
|
||||
let testCaseEntry = testArray.shift();
|
||||
if (!testCaseEntry) throw new Error("undefined testCaseEntry state. debug unit test");
|
||||
if ("block" in testCaseEntry) {
|
||||
blockInput = BlockInputBlobs.createFromBlock(testCaseEntry);
|
||||
expect(blockInput.hasBlock()).toBeTruthy();
|
||||
expect(blockInput.hasBlob(0)).toBeFalsy();
|
||||
if (blobCount === 0) {
|
||||
expect(blockInput.hasAllData()).toBeTruthy();
|
||||
} else {
|
||||
expect(blockInput.hasAllData()).toBeFalsy();
|
||||
}
|
||||
} else {
|
||||
blockInput = BlockInputBlobs.createFromBlob(testCaseEntry as AddBlob & CreateBlockInputMeta);
|
||||
expect(blockInput.hasBlock()).toBeFalsy();
|
||||
expect(blockInput.hasBlob(0)).toBeTruthy();
|
||||
// expect falsy here because block/blobCount not known yet
|
||||
expect(blockInput.hasAllData()).toBeFalsy();
|
||||
}
|
||||
|
||||
for (testCaseEntry of testArray) {
|
||||
if ("block" in testCaseEntry) {
|
||||
expect(blockInput.hasBlock()).toBeFalsy();
|
||||
blockInput.addBlock(testCaseEntry);
|
||||
expect(blockInput.hasBlock()).toBeTruthy();
|
||||
} else {
|
||||
expect(blockInput.hasAllData()).toBeFalsy();
|
||||
expect(blockInput.hasBlob(testCaseEntry.blobSidecar.index)).toBeFalsy();
|
||||
blockInput.addBlob(testCaseEntry as AddBlob);
|
||||
expect(blockInput.hasBlob(testCaseEntry.blobSidecar.index)).toBeTruthy();
|
||||
}
|
||||
}
|
||||
expect(blockInput.hasAllData()).toBeTruthy();
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -3,7 +3,7 @@ import {BitArray} from "@chainsafe/ssz";
|
||||
import {SYNC_COMMITTEE_SIZE, SYNC_COMMITTEE_SUBNET_COUNT} from "@lodestar/params";
|
||||
import {newFilledArray} from "@lodestar/state-transition";
|
||||
import {ssz} from "@lodestar/types";
|
||||
import {beforeAll, beforeEach, describe, expect, it} from "vitest";
|
||||
import {beforeAll, beforeEach, describe, expect, it, vi} from "vitest";
|
||||
import {
|
||||
SyncContributionAndProofPool,
|
||||
SyncContributionFast,
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
} from "../../../../src/chain/opPools/syncContributionAndProofPool.js";
|
||||
import {InsertOutcome} from "../../../../src/chain/opPools/types.js";
|
||||
import {EMPTY_SIGNATURE} from "../../../../src/constants/index.js";
|
||||
import {getMockedClock} from "../../../mocks/clock.js";
|
||||
import {renderBitArray} from "../../../utils/render.js";
|
||||
import {VALID_BLS_SIGNATURE_RAND} from "../../../utils/typeGenerator.js";
|
||||
|
||||
@@ -24,9 +25,11 @@ describe("chain / opPools / SyncContributionAndProofPool", () => {
|
||||
contributionAndProof.contribution.slot = slot;
|
||||
contributionAndProof.contribution.beaconBlockRoot = beaconBlockRoot;
|
||||
contributionAndProof.contribution.signature = VALID_BLS_SIGNATURE_RAND;
|
||||
const clockStub = getMockedClock();
|
||||
|
||||
beforeEach(() => {
|
||||
cache = new SyncContributionAndProofPool();
|
||||
vi.spyOn(clockStub, "slotWithPastTolerance").mockReturnValue(slot);
|
||||
cache = new SyncContributionAndProofPool(clockStub);
|
||||
cache.add(contributionAndProof, syncCommitteeParticipants);
|
||||
});
|
||||
|
||||
@@ -42,6 +45,15 @@ describe("chain / opPools / SyncContributionAndProofPool", () => {
|
||||
// TODO Test it's correct. Modify the contributions above so they have 1 bit set to true
|
||||
expect(aggregate.syncCommitteeBits.bitLen).toBe(512);
|
||||
});
|
||||
|
||||
it("should reject SyncCommitteeContribution of previous slots", () => {
|
||||
const newContributionAndProof = ssz.altair.ContributionAndProof.defaultValue();
|
||||
// previous slot
|
||||
newContributionAndProof.contribution.slot = slot - 1;
|
||||
expect(cache.add(newContributionAndProof, syncCommitteeParticipants)).toEqual(InsertOutcome.Late);
|
||||
// but a priority ContributionAndProof should work
|
||||
expect(cache.add(newContributionAndProof, syncCommitteeParticipants, true)).toEqual(InsertOutcome.NewData);
|
||||
});
|
||||
});
|
||||
|
||||
describe("replaceIfBetter", () => {
|
||||
|
||||
@@ -49,6 +49,7 @@ describe("SeenAggregatedAttestations.isKnown", () => {
|
||||
const aggregationBits = new BitArray(new Uint8Array(seenAttestingBits), 8);
|
||||
cache.add(
|
||||
targetEpoch,
|
||||
0,
|
||||
attDataRoot,
|
||||
{aggregationBits, trueBitCount: aggregationBits.getTrueBitIndexes().length},
|
||||
false
|
||||
@@ -56,7 +57,7 @@ describe("SeenAggregatedAttestations.isKnown", () => {
|
||||
for (const {bits, isKnown} of checkAttestingBits) {
|
||||
// expect(cache.participantsKnown(subsetContribution)).to.equal(isKnown);
|
||||
const toCheckAggBits = new BitArray(new Uint8Array(bits), 8);
|
||||
expect(cache.isKnown(targetEpoch, attDataRoot, toCheckAggBits)).toBe(isKnown);
|
||||
expect(cache.isKnown(targetEpoch, 0, attDataRoot, toCheckAggBits)).toBe(isKnown);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -0,0 +1,484 @@
|
||||
import {createChainForkConfig, defaultChainConfig} from "@lodestar/config";
|
||||
import {ForkName, ForkPostCapella, ForkPostDeneb} from "@lodestar/params";
|
||||
import {computeStartSlotAtEpoch, signedBlockToSignedHeader} from "@lodestar/state-transition";
|
||||
import {SignedBeaconBlock, deneb, ssz} from "@lodestar/types";
|
||||
import {toRootHex} from "@lodestar/utils";
|
||||
import {beforeEach, describe, expect, it} from "vitest";
|
||||
import {
|
||||
BlockInputSource,
|
||||
IBlockInput,
|
||||
isBlockInputBlobs,
|
||||
isBlockInputPreDeneb,
|
||||
} from "../../../../src/chain/blocks/blockInput/index.js";
|
||||
import {ChainEvent, ChainEventEmitter} from "../../../../src/chain/emitter.js";
|
||||
import {SeenBlockInputCache} from "../../../../src/chain/seenCache/seenBlockInput.js";
|
||||
import {Clock} from "../../../../src/util/clock.js";
|
||||
import {testLogger} from "../../../utils/logger.js";
|
||||
|
||||
describe("SeenBlockInputCache", () => {
|
||||
let cache: SeenBlockInputCache;
|
||||
let abortController: AbortController;
|
||||
let chainEvents: ChainEventEmitter;
|
||||
|
||||
const CAPELLA_FORK_EPOCH = 0;
|
||||
const DENEB_FORK_EPOCH = 1;
|
||||
const ELECTRA_FORK_EPOCH = 2;
|
||||
const FULU_FORK_EPOCH = 3;
|
||||
const config = createChainForkConfig({
|
||||
...defaultChainConfig,
|
||||
CAPELLA_FORK_EPOCH,
|
||||
DENEB_FORK_EPOCH,
|
||||
ELECTRA_FORK_EPOCH,
|
||||
FULU_FORK_EPOCH,
|
||||
});
|
||||
|
||||
const slots: Record<ForkPostCapella, number> = {
|
||||
capella: computeStartSlotAtEpoch(CAPELLA_FORK_EPOCH),
|
||||
deneb: computeStartSlotAtEpoch(DENEB_FORK_EPOCH),
|
||||
electra: computeStartSlotAtEpoch(ELECTRA_FORK_EPOCH),
|
||||
fulu: computeStartSlotAtEpoch(FULU_FORK_EPOCH),
|
||||
};
|
||||
|
||||
type BlockTestSet<F extends ForkPostCapella> = {
|
||||
block: SignedBeaconBlock<F>;
|
||||
blockRoot: Uint8Array;
|
||||
rootHex: string;
|
||||
};
|
||||
|
||||
function buildBlockTestSet<F extends ForkPostCapella = ForkPostCapella>(forkName: F): BlockTestSet<F> {
|
||||
const block = ssz[forkName].SignedBeaconBlock.defaultValue();
|
||||
block.message.slot = slots[forkName];
|
||||
const blockRoot = ssz[forkName].BeaconBlock.hashTreeRoot(block.message as any);
|
||||
const rootHex = toRootHex(blockRoot);
|
||||
return {
|
||||
block,
|
||||
blockRoot,
|
||||
rootHex,
|
||||
};
|
||||
}
|
||||
|
||||
type ParentAndChildBlockTestSet<F extends ForkPostCapella> = {
|
||||
parentBlock: SignedBeaconBlock<F>;
|
||||
parentBlockRoot: Uint8Array;
|
||||
parentRootHex: string;
|
||||
childBlock: SignedBeaconBlock<F>;
|
||||
childBlockRoot: Uint8Array;
|
||||
childRootHex: string;
|
||||
};
|
||||
function buildParentAndChildBlockTestSet<F extends ForkPostCapella = ForkPostCapella>(
|
||||
forkName: F
|
||||
): ParentAndChildBlockTestSet<F> {
|
||||
const {block: parentBlock, blockRoot: parentBlockRoot, rootHex: parentRootHex} = buildBlockTestSet(forkName);
|
||||
const {block: childBlock, blockRoot: childBlockRoot, rootHex: childRootHex} = buildBlockTestSet(forkName);
|
||||
childBlock.message.slot = parentBlock.message.slot + 1;
|
||||
childBlock.message.parentRoot = parentBlockRoot;
|
||||
return {
|
||||
parentBlock,
|
||||
parentBlockRoot,
|
||||
parentRootHex,
|
||||
childBlock,
|
||||
childBlockRoot,
|
||||
childRootHex,
|
||||
};
|
||||
}
|
||||
|
||||
type BlockAndBlobTestSet<F extends ForkPostDeneb = ForkPostDeneb> = BlockTestSet<F> & {
|
||||
blobSidecar: deneb.BlobSidecar;
|
||||
};
|
||||
function buildBlockAndBlobTestSet(forkName: ForkPostDeneb): BlockAndBlobTestSet<ForkPostDeneb> {
|
||||
const {block, blockRoot, rootHex} = buildBlockTestSet<ForkPostDeneb>(forkName);
|
||||
const commitment = Buffer.alloc(48, 0x77);
|
||||
block.message.body.blobKzgCommitments = [commitment];
|
||||
const signedBlockHeader = signedBlockToSignedHeader(config, block);
|
||||
const blobSidecar = ssz[forkName].BlobSidecar.defaultValue();
|
||||
blobSidecar.signedBlockHeader = signedBlockHeader;
|
||||
blobSidecar.kzgCommitment = commitment;
|
||||
|
||||
return {
|
||||
block,
|
||||
blockRoot,
|
||||
rootHex,
|
||||
blobSidecar,
|
||||
};
|
||||
}
|
||||
|
||||
const logger = testLogger();
|
||||
beforeEach(() => {
|
||||
chainEvents = new ChainEventEmitter();
|
||||
abortController = new AbortController();
|
||||
const signal = abortController.signal;
|
||||
const genesisTime = Math.floor(Date.now() / 1000);
|
||||
cache = new SeenBlockInputCache({
|
||||
config,
|
||||
clock: new Clock({config, genesisTime, signal}),
|
||||
chainEvents,
|
||||
signal,
|
||||
logger,
|
||||
metrics: null,
|
||||
});
|
||||
});
|
||||
describe("has()", () => {
|
||||
it("should return true if in cache", () => {
|
||||
const {block, rootHex} = buildBlockTestSet(ForkName.capella);
|
||||
cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.has(rootHex)).toBeTruthy();
|
||||
});
|
||||
it("should return false if not in cache", () => {
|
||||
const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella);
|
||||
cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.has(rootHex)).toBeTruthy();
|
||||
blockRoot[0] = (blockRoot[0] + 1) % 255;
|
||||
blockRoot[1] = (blockRoot[1] + 1) % 255;
|
||||
blockRoot[2] = (blockRoot[2] + 1) % 255;
|
||||
expect(cache.has(toRootHex(blockRoot))).toBeFalsy();
|
||||
});
|
||||
});
|
||||
describe("get()", () => {
|
||||
it("should return BlockInput if in cache", () => {
|
||||
const {block, rootHex} = buildBlockTestSet(ForkName.capella);
|
||||
const blockInput = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(rootHex)).toBe(blockInput);
|
||||
});
|
||||
it("should return undefined if not in cache", () => {
|
||||
const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella);
|
||||
const blockInput = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(rootHex)).toBe(blockInput);
|
||||
blockRoot[0] = (blockRoot[0] + 1) % 255;
|
||||
blockRoot[1] = (blockRoot[1] + 1) % 255;
|
||||
blockRoot[2] = (blockRoot[2] + 1) % 255;
|
||||
expect(cache.get(toRootHex(blockRoot))).toBeUndefined();
|
||||
});
|
||||
});
|
||||
describe("remove()", () => {
|
||||
it("should remove a BlockInput", () => {
|
||||
const {block, rootHex} = buildBlockTestSet(ForkName.capella);
|
||||
const blockInput = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(rootHex)).toBe(blockInput);
|
||||
cache.remove(rootHex);
|
||||
expect(cache.get(rootHex)).toBeUndefined();
|
||||
});
|
||||
it("should not throw an error if BlockInput not in cache", () => {
|
||||
const {block, blockRoot, rootHex} = buildBlockTestSet(ForkName.capella);
|
||||
const blockInput = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(rootHex)).toBe(blockInput);
|
||||
blockRoot[0] = (blockRoot[0] + 1) % 255;
|
||||
blockRoot[1] = (blockRoot[1] + 1) % 255;
|
||||
blockRoot[2] = (blockRoot[2] + 1) % 255;
|
||||
expect(() => cache.remove(toRootHex(blockRoot))).not.toThrow();
|
||||
expect(cache.has(rootHex)).toBeTruthy();
|
||||
});
|
||||
});
|
||||
describe("prune()", () => {
|
||||
it("should remove a BlockInput", () => {
|
||||
const {block, rootHex} = buildBlockTestSet(ForkName.capella);
|
||||
const blockInput = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(rootHex)).toBe(blockInput);
|
||||
cache.prune(rootHex);
|
||||
expect(cache.get(rootHex)).toBeUndefined();
|
||||
});
|
||||
it("should remove all ancestors of a BlockInput", () => {
|
||||
const {parentBlock, parentRootHex, childBlock, childRootHex} = buildParentAndChildBlockTestSet(ForkName.capella);
|
||||
|
||||
const parentBlockInput = cache.getByBlock({
|
||||
block: parentBlock,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(parentRootHex)).toBe(parentBlockInput);
|
||||
|
||||
const childBlockInput = cache.getByBlock({
|
||||
block: childBlock,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(childRootHex)).toBe(childBlockInput);
|
||||
|
||||
cache.prune(childRootHex);
|
||||
expect(cache.get(childRootHex)).toBeUndefined();
|
||||
expect(cache.get(parentRootHex)).toBeUndefined();
|
||||
});
|
||||
});
|
||||
describe("onFinalized()", () => {
|
||||
let childRootHex: string;
|
||||
let childBlockInput: IBlockInput;
|
||||
let parentRootHex: string;
|
||||
let parentBlockInput: IBlockInput;
|
||||
const root = Buffer.alloc(32, 0xff);
|
||||
const rootHex = toRootHex(root);
|
||||
beforeEach(() => {
|
||||
const {
|
||||
parentBlock,
|
||||
parentRootHex: parentRoot,
|
||||
childBlock,
|
||||
childRootHex: childRoot,
|
||||
} = buildParentAndChildBlockTestSet(ForkName.capella);
|
||||
parentRootHex = parentRoot;
|
||||
childRootHex = childRoot;
|
||||
|
||||
parentBlockInput = cache.getByBlock({
|
||||
block: parentBlock,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(parentRootHex)).toBe(parentBlockInput);
|
||||
|
||||
childBlockInput = cache.getByBlock({
|
||||
block: childBlock,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(childRootHex)).toBe(childBlockInput);
|
||||
});
|
||||
it("should remove all BlockInputs in slots before the checkpoint", () => {
|
||||
chainEvents.emit(ChainEvent.forkChoiceFinalized, {
|
||||
epoch: DENEB_FORK_EPOCH,
|
||||
root,
|
||||
rootHex,
|
||||
});
|
||||
expect(cache.get(childRootHex)).toBeUndefined();
|
||||
expect(cache.get(parentRootHex)).toBeUndefined();
|
||||
});
|
||||
it("should not remove BlockInputs in slots after the checkpoint", () => {
|
||||
chainEvents.emit(ChainEvent.forkChoiceFinalized, {
|
||||
epoch: CAPELLA_FORK_EPOCH,
|
||||
root,
|
||||
rootHex,
|
||||
});
|
||||
expect(cache.get(childRootHex)).toBe(childBlockInput);
|
||||
expect(cache.get(parentRootHex)).toBe(parentBlockInput);
|
||||
});
|
||||
});
|
||||
describe("getByBlock()", () => {
|
||||
it("should return a new BlockInput for a new block root", () => {
|
||||
const {block, rootHex} = buildBlockTestSet(ForkName.capella);
|
||||
expect(cache.get(rootHex)).toBeUndefined();
|
||||
const blockInput = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(rootHex)).toBe(blockInput);
|
||||
});
|
||||
describe("should return the correct type of BlockInput for a given block root", () => {
|
||||
it("should return a BlockInputPreDeneb", () => {
|
||||
const {block} = buildBlockTestSet(ForkName.capella);
|
||||
const blockInput = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(isBlockInputPreDeneb(blockInput)).toBeTruthy();
|
||||
});
|
||||
it("should return a BlockInputBlobs", () => {
|
||||
const {block} = buildBlockTestSet(ForkName.deneb);
|
||||
const blockInput = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(isBlockInputBlobs(blockInput)).toBeTruthy();
|
||||
});
|
||||
// TODO(fulu): need to turn this on once we have custodyConfig available with peerDAS branch
|
||||
// it("should return a BlockInputColumns", () => {
|
||||
// const {block} = buildBlockTestSet(ForkName.fulu);
|
||||
// const blockInput = cache.getByBlock({
|
||||
// block,
|
||||
// source: BlockInputSource.gossip,
|
||||
// seenTimestampSec: Date.now(),
|
||||
// });
|
||||
// expect(isBlockInputColumns(blockInput)).toBeTruthy();
|
||||
// });
|
||||
});
|
||||
it("should return the same BlockInput for an existing block root", () => {
|
||||
const {block, rootHex} = buildBlockTestSet(ForkName.capella);
|
||||
const blockInput1 = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(rootHex)).toBe(blockInput1);
|
||||
const blockInput2 = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(blockInput1).toBe(blockInput2);
|
||||
});
|
||||
it("should not throw for a BlockInput with an existing block", () => {
|
||||
const {block, rootHex} = buildBlockTestSet(ForkName.capella);
|
||||
const blockInput = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(() =>
|
||||
blockInput.addBlock({
|
||||
block,
|
||||
blockRootHex: rootHex,
|
||||
source: {source: BlockInputSource.gossip, seenTimestampSec: Date.now()},
|
||||
})
|
||||
).toThrow();
|
||||
expect(() =>
|
||||
cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
})
|
||||
).not.toThrow();
|
||||
});
|
||||
it("should return the correct BlockInput for a BlockInput created by blob", () => {
|
||||
const {block, blobSidecar} = buildBlockAndBlobTestSet(ForkName.deneb);
|
||||
|
||||
const blockInput1 = cache.getByBlob({
|
||||
blobSidecar,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
const blockInput2 = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
|
||||
expect(blockInput1).toBe(blockInput2);
|
||||
});
|
||||
});
|
||||
describe("getByBlob()", () => {
|
||||
it("should return a new BlockInput for a new block root", () => {
|
||||
const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra);
|
||||
expect(cache.get(rootHex)).toBeUndefined();
|
||||
const blockInput = cache.getByBlob({
|
||||
blobSidecar,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(rootHex)).toBe(blockInput);
|
||||
});
|
||||
it("should return the same BlockInput for an existing block root", () => {
|
||||
const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra);
|
||||
|
||||
const blockInput1 = cache.getByBlob({
|
||||
blobSidecar,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(rootHex)).toBe(blockInput1);
|
||||
const blockInput2 = cache.getByBlob({
|
||||
blobSidecar,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(blockInput1).toBe(blockInput2);
|
||||
});
|
||||
it("should throw if attempting to add a blob to wrong type of BlockInput", () => {
|
||||
const {block} = buildBlockTestSet(ForkName.capella);
|
||||
const blockInput = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(isBlockInputPreDeneb(blockInput)).toBeTruthy();
|
||||
|
||||
const {blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra);
|
||||
blobSidecar.signedBlockHeader = signedBlockToSignedHeader(config, block);
|
||||
expect(() =>
|
||||
cache.getByBlob({blobSidecar, source: BlockInputSource.gossip, seenTimestampSec: Date.now()})
|
||||
).toThrow();
|
||||
});
|
||||
it("should add blob to an existing BlockInput", () => {
|
||||
const {block, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra);
|
||||
|
||||
const blockInput1 = cache.getByBlock({
|
||||
block,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
const blockInput2 = cache.getByBlob({
|
||||
blobSidecar,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
|
||||
expect(blockInput1).toBe(blockInput2);
|
||||
expect(blockInput2.getBlobs()[0]).toBe(blobSidecar);
|
||||
});
|
||||
it("should not throw for a BlockInput with an existing blob", () => {
|
||||
const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra);
|
||||
|
||||
expect(cache.get(rootHex)).toBeUndefined();
|
||||
const blockInput = cache.getByBlob({
|
||||
blobSidecar,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
});
|
||||
expect(cache.get(rootHex)).toBe(blockInput);
|
||||
expect(() =>
|
||||
blockInput.addBlob({
|
||||
blobSidecar,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
blockRootHex: rootHex,
|
||||
})
|
||||
).toThrow();
|
||||
expect(() =>
|
||||
cache.getByBlob({
|
||||
blobSidecar,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
})
|
||||
).not.toThrow();
|
||||
});
|
||||
it("should throw for an existing blob with opts.throwGossipErrorIfAlreadyKnown", () => {
|
||||
const {rootHex, blobSidecar} = buildBlockAndBlobTestSet(ForkName.electra);
|
||||
|
||||
expect(cache.get(rootHex)).toBeUndefined();
|
||||
const blockInput = cache.getByBlob(
|
||||
{
|
||||
blobSidecar,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
},
|
||||
{throwErrorIfAlreadyKnown: true}
|
||||
);
|
||||
expect(cache.get(rootHex)).toBe(blockInput);
|
||||
expect(() =>
|
||||
cache.getByBlob(
|
||||
{
|
||||
blobSidecar,
|
||||
source: BlockInputSource.gossip,
|
||||
seenTimestampSec: Date.now(),
|
||||
},
|
||||
{throwErrorIfAlreadyKnown: true}
|
||||
)
|
||||
).toThrow();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -73,6 +73,7 @@ describe("chain / validation / aggregateAndProof", () => {
|
||||
// Register attester as already seen
|
||||
chain.seenAggregatedAttestations.add(
|
||||
attData.target.epoch,
|
||||
attData.index,
|
||||
toHexString(ssz.phase0.AttestationData.hashTreeRoot(attData)),
|
||||
{aggregationBits, trueBitCount: aggregationBits.getTrueBitIndexes().length},
|
||||
false
|
||||
|
||||
@@ -28,7 +28,7 @@ describe("gossip block validation", () => {
|
||||
beforeEach(() => {
|
||||
// Fill up with kzg commitments
|
||||
block.body.blobKzgCommitments = Array.from(
|
||||
{length: config.BLOB_SCHEDULE[0].MAX_BLOBS_PER_BLOCK},
|
||||
{length: config.getMaxBlobsPerBlock(clockSlot)},
|
||||
() => new Uint8Array([0])
|
||||
);
|
||||
|
||||
|
||||
@@ -29,7 +29,6 @@ export function getConfig(fork: ForkName, forkEpoch = 0): ChainForkConfig {
|
||||
BELLATRIX_FORK_EPOCH: 0,
|
||||
CAPELLA_FORK_EPOCH: 0,
|
||||
DENEB_FORK_EPOCH: forkEpoch,
|
||||
BLOB_SCHEDULE: [{EPOCH: forkEpoch, MAX_BLOBS_PER_BLOCK: 6}],
|
||||
});
|
||||
case ForkName.electra:
|
||||
return createChainForkConfig({
|
||||
@@ -38,10 +37,6 @@ export function getConfig(fork: ForkName, forkEpoch = 0): ChainForkConfig {
|
||||
CAPELLA_FORK_EPOCH: 0,
|
||||
DENEB_FORK_EPOCH: 0,
|
||||
ELECTRA_FORK_EPOCH: forkEpoch,
|
||||
BLOB_SCHEDULE: [
|
||||
{EPOCH: 0, MAX_BLOBS_PER_BLOCK: 6},
|
||||
{EPOCH: forkEpoch, MAX_BLOBS_PER_BLOCK: 9},
|
||||
],
|
||||
});
|
||||
case ForkName.fulu:
|
||||
return createChainForkConfig({
|
||||
@@ -51,10 +46,7 @@ export function getConfig(fork: ForkName, forkEpoch = 0): ChainForkConfig {
|
||||
DENEB_FORK_EPOCH: 0,
|
||||
ELECTRA_FORK_EPOCH: 0,
|
||||
FULU_FORK_EPOCH: forkEpoch,
|
||||
BLOB_SCHEDULE: [
|
||||
{EPOCH: 0, MAX_BLOBS_PER_BLOCK: 6},
|
||||
{EPOCH: 0, MAX_BLOBS_PER_BLOCK: 9},
|
||||
],
|
||||
BLOB_SCHEDULE: [],
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,9 +56,9 @@
|
||||
"@chainsafe/blst": "^2.2.0",
|
||||
"@chainsafe/discv5": "^11.0.0",
|
||||
"@chainsafe/enr": "^5.0.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.1.0",
|
||||
"@chainsafe/ssz": "^1.2.0",
|
||||
"@chainsafe/threads": "^1.11.1",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.2.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@chainsafe/threads": "^1.11.2",
|
||||
"@libp2p/crypto": "^5.0.15",
|
||||
"@libp2p/interface": "^2.7.0",
|
||||
"@libp2p/peer-id": "^5.1.0",
|
||||
@@ -74,6 +74,7 @@
|
||||
"@lodestar/utils": "^1.30.0",
|
||||
"@lodestar/validator": "^1.30.0",
|
||||
"@multiformats/multiaddr": "^12.1.3",
|
||||
"cpu-features": "^0.0.10",
|
||||
"deepmerge": "^4.3.1",
|
||||
"ethers": "^6.7.0",
|
||||
"find-up": "^6.3.0",
|
||||
@@ -88,6 +89,7 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@lodestar/test-utils": "^1.30.0",
|
||||
"@types/cpu-features": "^0.0.3",
|
||||
"@types/debug": "^4.1.7",
|
||||
"@types/inquirer": "^9.0.3",
|
||||
"@types/proper-lockfile": "^4.1.4",
|
||||
|
||||
@@ -1,9 +1,23 @@
|
||||
// MUST import this file first before anything and not import any Lodestar code.
|
||||
import {setHasher} from "@chainsafe/persistent-merkle-tree";
|
||||
import {hasher} from "@chainsafe/persistent-merkle-tree/hasher/hashtree";
|
||||
import {hasher as asSha256Hasher} from "@chainsafe/persistent-merkle-tree/hasher/as-sha256";
|
||||
import {hasher as hashtreeHasher} from "@chainsafe/persistent-merkle-tree/hasher/hashtree";
|
||||
import CpuFeatures from "cpu-features";
|
||||
|
||||
// without setting this first, persistent-merkle-tree will use noble instead
|
||||
setHasher(hasher);
|
||||
const cpuFeatures = CpuFeatures();
|
||||
if (
|
||||
cpuFeatures.arch === "x86" &&
|
||||
!(
|
||||
(cpuFeatures.flags.avx512f && cpuFeatures.flags.avx512vl) ||
|
||||
(cpuFeatures.flags.avx2 && cpuFeatures.flags.bmi2) ||
|
||||
(cpuFeatures.flags.avx && cpuFeatures.flags.sha)
|
||||
)
|
||||
) {
|
||||
setHasher(asSha256Hasher);
|
||||
} else {
|
||||
setHasher(hashtreeHasher);
|
||||
}
|
||||
|
||||
//
|
||||
// ## Rationale
|
||||
|
||||
@@ -38,7 +38,7 @@ export async function beaconHandler(args: BeaconArgs & GlobalArgs): Promise<void
|
||||
const {config, options, beaconPaths, network, version, commit, privateKey, logger} = await beaconHandlerInit(args);
|
||||
|
||||
if (hasher.name !== "hashtree") {
|
||||
throw Error(`Loaded incorrect hasher ${hasher.name}, expected hashtree`);
|
||||
logger.warn(`hashtree is not supported, using hasher ${hasher.name}`);
|
||||
}
|
||||
|
||||
const heapSizeLimit = getHeapStatistics().heap_size_limit;
|
||||
|
||||
@@ -64,7 +64,7 @@
|
||||
"blockchain"
|
||||
],
|
||||
"dependencies": {
|
||||
"@chainsafe/ssz": "^1.2.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@lodestar/params": "^1.30.0",
|
||||
"@lodestar/types": "^1.30.0",
|
||||
"@lodestar/utils": "^1.30.0"
|
||||
|
||||
@@ -109,7 +109,7 @@ export const chainConfig: ChainConfig = {
|
||||
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096,
|
||||
BLOB_SIDECAR_SUBNET_COUNT: 6,
|
||||
MAX_BLOBS_PER_BLOCK: 6,
|
||||
// MAX_REQUEST_BLOCKS_DENEB * BLOB_SCHEDULE[0].MAX_BLOBS_PER_BLOCK
|
||||
// MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
|
||||
MAX_REQUEST_BLOB_SIDECARS: 768,
|
||||
|
||||
// Electra
|
||||
@@ -119,7 +119,7 @@ export const chainConfig: ChainConfig = {
|
||||
MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000,
|
||||
BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9,
|
||||
MAX_BLOBS_PER_BLOCK_ELECTRA: 9,
|
||||
// MAX_REQUEST_BLOCKS_DENEB * BLOB_SCHEDULE[1].MAX_BLOBS_PER_BLOCK
|
||||
// MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA
|
||||
MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152,
|
||||
|
||||
// Fulu
|
||||
@@ -133,10 +133,5 @@ export const chainConfig: ChainConfig = {
|
||||
|
||||
// Blob Scheduling
|
||||
// ---------------------------------------------------------------
|
||||
BLOB_SCHEDULE: [
|
||||
// Deneb
|
||||
{EPOCH: 269568, MAX_BLOBS_PER_BLOCK: 6},
|
||||
// Electra
|
||||
{EPOCH: 364032, MAX_BLOBS_PER_BLOCK: 9},
|
||||
],
|
||||
BLOB_SCHEDULE: [],
|
||||
};
|
||||
|
||||
@@ -105,7 +105,7 @@ export const chainConfig: ChainConfig = {
|
||||
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096,
|
||||
BLOB_SIDECAR_SUBNET_COUNT: 6,
|
||||
MAX_BLOBS_PER_BLOCK: 6,
|
||||
// MAX_REQUEST_BLOCKS_DENEB * BLOB_SCHEDULE[0].MAX_BLOBS_PER_BLOCK
|
||||
// MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
|
||||
MAX_REQUEST_BLOB_SIDECARS: 768,
|
||||
|
||||
// Electra
|
||||
@@ -115,7 +115,7 @@ export const chainConfig: ChainConfig = {
|
||||
MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 64000000000,
|
||||
BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9,
|
||||
MAX_BLOBS_PER_BLOCK_ELECTRA: 9,
|
||||
// MAX_REQUEST_BLOCKS_DENEB * BLOB_SCHEDULE[1].MAX_BLOBS_PER_BLOCK
|
||||
// MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA
|
||||
MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152,
|
||||
|
||||
// Fulu
|
||||
@@ -129,10 +129,5 @@ export const chainConfig: ChainConfig = {
|
||||
|
||||
// Blob Scheduling
|
||||
// ---------------------------------------------------------------
|
||||
BLOB_SCHEDULE: [
|
||||
// Deneb
|
||||
{EPOCH: Infinity, MAX_BLOBS_PER_BLOCK: 6},
|
||||
// Electra
|
||||
{EPOCH: Infinity, MAX_BLOBS_PER_BLOCK: 9},
|
||||
],
|
||||
BLOB_SCHEDULE: [],
|
||||
};
|
||||
|
||||
@@ -2,7 +2,7 @@ import {ACTIVE_PRESET} from "@lodestar/params";
|
||||
import {defaultChainConfig} from "./default.js";
|
||||
import {ChainConfig} from "./types.js";
|
||||
|
||||
export {chainConfigToJson, chainConfigFromJson, specValuesToJson} from "./json.js";
|
||||
export {chainConfigToJson, chainConfigFromJson, specValuesToJson, deserializeBlobSchedule} from "./json.js";
|
||||
export * from "./types.js";
|
||||
export * from "./default.js";
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import {fromHex, toHex} from "@lodestar/utils";
|
||||
import {
|
||||
BlobSchedule,
|
||||
BlobScheduleEntry,
|
||||
ChainConfig,
|
||||
SpecJson,
|
||||
@@ -103,45 +104,7 @@ export function serializeSpecValue(
|
||||
|
||||
export function deserializeSpecValue(valueStr: unknown, typeName: SpecValueTypeName, keyName: string): SpecValue {
|
||||
if (typeName === "blob_schedule") {
|
||||
if (!Array.isArray(valueStr)) {
|
||||
throw Error(`Invalid BLOB_SCHEDULE value ${valueStr} expected array`);
|
||||
}
|
||||
|
||||
const blobSchedule = valueStr.map((entry, i) => {
|
||||
if (typeof entry !== "object" || entry === null) {
|
||||
throw Error(`Invalid BLOB_SCHEDULE[${i}] entry ${entry} expected object`);
|
||||
}
|
||||
|
||||
const out = {} as BlobScheduleEntry;
|
||||
|
||||
for (const key of ["EPOCH", "MAX_BLOBS_PER_BLOCK"] as Array<keyof BlobScheduleEntry>) {
|
||||
const value = entry[key];
|
||||
|
||||
if (value === undefined) {
|
||||
throw Error(`Invalid BLOB_SCHEDULE[${i}] entry ${JSON.stringify(entry)} missing ${key}`);
|
||||
}
|
||||
|
||||
if (typeof value !== "string") {
|
||||
throw Error(`Invalid BLOB_SCHEDULE[${i}].${key} value ${value} expected string`);
|
||||
}
|
||||
|
||||
if (value === MAX_UINT64_JSON) {
|
||||
out[key] = Infinity;
|
||||
} else {
|
||||
const parsed = parseInt(value, 10);
|
||||
|
||||
if (Number.isNaN(parsed)) {
|
||||
throw Error(`Invalid BLOB_SCHEDULE[${i}].${key} value ${value} expected number`);
|
||||
}
|
||||
|
||||
out[key] = parsed;
|
||||
}
|
||||
}
|
||||
|
||||
return out;
|
||||
});
|
||||
|
||||
return blobSchedule;
|
||||
return deserializeBlobSchedule(valueStr);
|
||||
}
|
||||
|
||||
if (typeof valueStr !== "string") {
|
||||
@@ -165,3 +128,45 @@ export function deserializeSpecValue(valueStr: unknown, typeName: SpecValueTypeN
|
||||
return valueStr;
|
||||
}
|
||||
}
|
||||
|
||||
export function deserializeBlobSchedule(input: unknown): BlobSchedule {
|
||||
if (!Array.isArray(input)) {
|
||||
throw Error(`Invalid BLOB_SCHEDULE value ${input} expected array`);
|
||||
}
|
||||
|
||||
const blobSchedule = input.map((entry, i) => {
|
||||
if (typeof entry !== "object" || entry === null) {
|
||||
throw Error(`Invalid BLOB_SCHEDULE[${i}] entry ${entry} expected object`);
|
||||
}
|
||||
|
||||
const out = {} as BlobScheduleEntry;
|
||||
|
||||
for (const key of ["EPOCH", "MAX_BLOBS_PER_BLOCK"] as Array<keyof BlobScheduleEntry>) {
|
||||
const value = entry[key];
|
||||
|
||||
if (value === undefined) {
|
||||
throw Error(`Invalid BLOB_SCHEDULE[${i}] entry ${JSON.stringify(entry)} missing ${key}`);
|
||||
}
|
||||
|
||||
if (typeof value !== "string") {
|
||||
throw Error(`Invalid BLOB_SCHEDULE[${i}].${key} value ${value} expected string`);
|
||||
}
|
||||
|
||||
if (value === MAX_UINT64_JSON) {
|
||||
out[key] = Infinity;
|
||||
} else {
|
||||
const parsed = parseInt(value, 10);
|
||||
|
||||
if (Number.isNaN(parsed)) {
|
||||
throw Error(`Invalid BLOB_SCHEDULE[${i}].${key} value ${value} expected number`);
|
||||
}
|
||||
|
||||
out[key] = parsed;
|
||||
}
|
||||
}
|
||||
|
||||
return out;
|
||||
});
|
||||
|
||||
return blobSchedule;
|
||||
}
|
||||
|
||||
@@ -43,4 +43,7 @@ export const chiadoChainConfig: ChainConfig = {
|
||||
// Fulu
|
||||
FULU_FORK_VERSION: b("0x0600006f"),
|
||||
FULU_FORK_EPOCH: Infinity,
|
||||
|
||||
// Blob Scheduling
|
||||
BLOB_SCHEDULE: [],
|
||||
};
|
||||
|
||||
@@ -51,6 +51,10 @@ const baseChainConfig: ChainConfig = {
|
||||
DEPOSIT_CONTRACT_ADDRESS: b("0x4242424242424242424242424242424242424242"),
|
||||
|
||||
ETH1_FOLLOW_DISTANCE: 12,
|
||||
|
||||
// Blob Scheduling
|
||||
// ---------------------------------------------------------------
|
||||
BLOB_SCHEDULE: [],
|
||||
};
|
||||
|
||||
// Reset interval (7 days) in milliseconds, based on ephemery-genesis values.env:
|
||||
|
||||
@@ -66,14 +66,9 @@ export const gnosisChainConfig: ChainConfig = {
|
||||
MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 64000000000,
|
||||
BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 2,
|
||||
MAX_BLOBS_PER_BLOCK_ELECTRA: 2,
|
||||
// MAX_REQUEST_BLOCKS_DENEB * BLOB_SCHEDULE[1].MAX_BLOBS_PER_BLOCK
|
||||
// MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA
|
||||
MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 256,
|
||||
|
||||
// Blob Scheduling
|
||||
BLOB_SCHEDULE: [
|
||||
// Deneb
|
||||
{EPOCH: 889856, MAX_BLOBS_PER_BLOCK: 2},
|
||||
// Electra
|
||||
{EPOCH: 1337856, MAX_BLOBS_PER_BLOCK: 2},
|
||||
],
|
||||
BLOB_SCHEDULE: [],
|
||||
};
|
||||
|
||||
@@ -48,4 +48,8 @@ export const holeskyChainConfig: ChainConfig = {
|
||||
DEPOSIT_CHAIN_ID: 17000,
|
||||
DEPOSIT_NETWORK_ID: 17000,
|
||||
DEPOSIT_CONTRACT_ADDRESS: b("0x4242424242424242424242424242424242424242"),
|
||||
|
||||
// Blob Scheduling
|
||||
// ---------------------------------------------------------------
|
||||
BLOB_SCHEDULE: [],
|
||||
};
|
||||
|
||||
@@ -48,4 +48,8 @@ export const hoodiChainConfig: ChainConfig = {
|
||||
// ---------------------------------------------------------------
|
||||
DEPOSIT_CHAIN_ID: 560048,
|
||||
DEPOSIT_NETWORK_ID: 560048,
|
||||
|
||||
// Blob Scheduling
|
||||
// ---------------------------------------------------------------
|
||||
BLOB_SCHEDULE: [],
|
||||
};
|
||||
|
||||
@@ -45,4 +45,8 @@ export const sepoliaChainConfig: ChainConfig = {
|
||||
DEPOSIT_CHAIN_ID: 11155111,
|
||||
DEPOSIT_NETWORK_ID: 11155111,
|
||||
DEPOSIT_CONTRACT_ADDRESS: b("0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D"),
|
||||
|
||||
// Blob Scheduling
|
||||
// ---------------------------------------------------------------
|
||||
BLOB_SCHEDULE: [],
|
||||
};
|
||||
|
||||
@@ -139,7 +139,6 @@ export function createForkConfig(config: ChainConfig): ForkConfig {
|
||||
return sszTypesFor(forkName);
|
||||
},
|
||||
getMaxBlobsPerBlock(epoch: Epoch): number {
|
||||
// TODO Fulu: Max blobs of Deneb and Electra are hardcoded for fusaka devnet-0. Remove this for devnet-1
|
||||
const fork = this.getForkInfoAtEpoch(epoch).name;
|
||||
|
||||
switch (fork) {
|
||||
@@ -149,10 +148,6 @@ export function createForkConfig(config: ChainConfig): ForkConfig {
|
||||
return config.MAX_BLOBS_PER_BLOCK;
|
||||
}
|
||||
|
||||
if (config.BLOB_SCHEDULE.length === 0) {
|
||||
throw Error("Attempt to get MAX_BLOBS_PER_BLOCK from empty BLOB_SCHEDULE");
|
||||
}
|
||||
|
||||
// Sort by epoch in descending order to find the latest applicable value
|
||||
const blobSchedule = [...config.BLOB_SCHEDULE].sort((a, b) => {
|
||||
if (a.EPOCH !== b.EPOCH) {
|
||||
@@ -168,7 +163,7 @@ export function createForkConfig(config: ChainConfig): ForkConfig {
|
||||
}
|
||||
|
||||
// Only for testing. Should never reach this line on a public network.
|
||||
return Math.min(...blobSchedule.map((e) => e.MAX_BLOBS_PER_BLOCK));
|
||||
return config.MAX_BLOBS_PER_BLOCK_ELECTRA;
|
||||
},
|
||||
getMaxRequestBlobSidecars(fork: ForkName): number {
|
||||
return isForkPostElectra(fork) ? config.MAX_REQUEST_BLOB_SIDECARS_ELECTRA : config.MAX_REQUEST_BLOB_SIDECARS;
|
||||
|
||||
@@ -7,8 +7,6 @@ describe("getMaxBlobsPerBlock", () => {
|
||||
|
||||
beforeAll(() => {
|
||||
// Force tests to run on fulu fork
|
||||
// TODO Fulu: getMaxBlobsPerBlock's result is hardcoded for deneb and electra in fusaka devnet-0. So we need to define
|
||||
// defaultConfig to force tests to run on fulu to get expected result. Remove this after devnet-0.
|
||||
defaultConfig = {
|
||||
...chainConfig,
|
||||
ALTAIR_FORK_EPOCH: 0,
|
||||
@@ -20,10 +18,10 @@ describe("getMaxBlobsPerBlock", () => {
|
||||
};
|
||||
});
|
||||
|
||||
it("should throw an error if BLOB_SCHEDULE is empty", () => {
|
||||
it("should return MAX_BLOBS_PER_BLOCK_ELECTRA if BLOB_SCHEDULE is empty", () => {
|
||||
const config = createForkConfig({...defaultConfig, BLOB_SCHEDULE: []});
|
||||
|
||||
expect(() => config.getMaxBlobsPerBlock(0)).toThrowError();
|
||||
expect(config.getMaxBlobsPerBlock(0)).toEqual(defaultConfig.MAX_BLOBS_PER_BLOCK_ELECTRA);
|
||||
});
|
||||
|
||||
it("should return same value for passed epochs if there is only a single BLOB_SCHEDULE entry", () => {
|
||||
@@ -64,7 +62,7 @@ describe("getMaxBlobsPerBlock", () => {
|
||||
{EPOCH: 10, MAX_BLOBS_PER_BLOCK: 5},
|
||||
],
|
||||
});
|
||||
expect(config.getMaxBlobsPerBlock(0)).toEqual(1);
|
||||
expect(config.getMaxBlobsPerBlock(5)).toEqual(3);
|
||||
expect(config.getMaxBlobsPerBlock(6)).toEqual(3);
|
||||
expect(config.getMaxBlobsPerBlock(10)).toEqual(5);
|
||||
expect(config.getMaxBlobsPerBlock(14)).toEqual(5);
|
||||
@@ -72,7 +70,7 @@ describe("getMaxBlobsPerBlock", () => {
|
||||
expect(config.getMaxBlobsPerBlock(16)).toEqual(1);
|
||||
});
|
||||
|
||||
it("should return minimum value if epoch is below lowest configured BLOB_SCHEDULE epoch", () => {
|
||||
it("should return MAX_BLOBS_PER_BLOCK_ELECTRA if epoch is below lowest configured BLOB_SCHEDULE epoch", () => {
|
||||
const config = createForkConfig({
|
||||
...defaultConfig,
|
||||
BLOB_SCHEDULE: [
|
||||
@@ -81,6 +79,6 @@ describe("getMaxBlobsPerBlock", () => {
|
||||
{EPOCH: 15, MAX_BLOBS_PER_BLOCK: 2},
|
||||
],
|
||||
});
|
||||
expect(config.getMaxBlobsPerBlock(0)).toEqual(2);
|
||||
expect(config.getMaxBlobsPerBlock(0)).toEqual(defaultConfig.MAX_BLOBS_PER_BLOCK_ELECTRA);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"check-readme": "typescript-docs-verifier"
|
||||
},
|
||||
"dependencies": {
|
||||
"@chainsafe/ssz": "^1.2.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@lodestar/config": "^1.30.0",
|
||||
"@lodestar/utils": "^1.30.0",
|
||||
"classic-level": "^1.4.1",
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
"check-readme": "typescript-docs-verifier"
|
||||
},
|
||||
"dependencies": {
|
||||
"@chainsafe/ssz": "^1.2.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@lodestar/config": "^1.30.0",
|
||||
"@lodestar/params": "^1.30.0",
|
||||
"@lodestar/state-transition": "^1.30.0",
|
||||
|
||||
@@ -72,8 +72,8 @@
|
||||
"dependencies": {
|
||||
"@chainsafe/bls": "7.1.3",
|
||||
"@chainsafe/blst": "^0.2.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.1.0",
|
||||
"@chainsafe/ssz": "^1.2.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.2.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@lodestar/api": "^1.30.0",
|
||||
"@lodestar/config": "^1.30.0",
|
||||
"@lodestar/params": "^1.30.0",
|
||||
@@ -82,7 +82,7 @@
|
||||
"mitt": "^3.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@chainsafe/as-sha256": "^1.1.0",
|
||||
"@chainsafe/as-sha256": "^1.2.0",
|
||||
"@types/qs": "^6.9.7",
|
||||
"fastify": "^5.2.1",
|
||||
"qs": "^6.11.1",
|
||||
|
||||
@@ -69,7 +69,7 @@
|
||||
"winston-transport": "^4.5.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@chainsafe/threads": "^1.11.1",
|
||||
"@chainsafe/threads": "^1.11.2",
|
||||
"@lodestar/test-utils": "^1.30.0",
|
||||
"@types/triple-beam": "^1.3.2",
|
||||
"triple-beam": "^1.3.0"
|
||||
|
||||
@@ -1,10 +1,23 @@
|
||||
// MUST import this file first before anything and not import any Lodestar code.
|
||||
|
||||
import {setHasher} from "@chainsafe/persistent-merkle-tree";
|
||||
import {hasher} from "@chainsafe/persistent-merkle-tree/hasher/hashtree";
|
||||
import {hasher as asSha256Hasher} from "@chainsafe/persistent-merkle-tree/hasher/as-sha256";
|
||||
import {hasher as hashtreeHasher} from "@chainsafe/persistent-merkle-tree/hasher/hashtree";
|
||||
import CpuFeatures from "cpu-features";
|
||||
|
||||
// without setting this first, persistent-merkle-tree will use noble instead
|
||||
setHasher(hasher);
|
||||
const cpuFeatures = CpuFeatures();
|
||||
if (
|
||||
cpuFeatures.arch === "x86" &&
|
||||
!(
|
||||
(cpuFeatures.flags.avx512f && cpuFeatures.flags.avx512vl) ||
|
||||
(cpuFeatures.flags.avx2 && cpuFeatures.flags.bmi2) ||
|
||||
(cpuFeatures.flags.avx && cpuFeatures.flags.sha)
|
||||
)
|
||||
) {
|
||||
setHasher(asSha256Hasher);
|
||||
} else {
|
||||
setHasher(hashtreeHasher);
|
||||
}
|
||||
|
||||
//
|
||||
// ## Rationale
|
||||
|
||||
@@ -58,12 +58,12 @@
|
||||
},
|
||||
"types": "lib/index.d.ts",
|
||||
"dependencies": {
|
||||
"@chainsafe/as-sha256": "^1.1.0",
|
||||
"@chainsafe/as-sha256": "^1.2.0",
|
||||
"@chainsafe/blst": "^2.2.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.1.0",
|
||||
"@chainsafe/persistent-merkle-tree": "^1.2.0",
|
||||
"@chainsafe/persistent-ts": "^1.0.0",
|
||||
"@chainsafe/pubkey-index-map": "^3.0.0",
|
||||
"@chainsafe/ssz": "^1.2.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@chainsafe/swap-or-not-shuffle": "^1.2.1",
|
||||
"@lodestar/config": "^1.30.0",
|
||||
"@lodestar/params": "^1.30.0",
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
},
|
||||
"types": "lib/index.d.ts",
|
||||
"dependencies": {
|
||||
"@chainsafe/ssz": "^1.2.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@lodestar/params": "^1.30.0",
|
||||
"ethereum-cryptography": "^2.0.0"
|
||||
},
|
||||
|
||||
@@ -64,7 +64,11 @@ export function sszTypesFor<F extends ForkName, K extends keyof SSZTypesByFork[F
|
||||
fork: F,
|
||||
typeName?: K
|
||||
): SSZTypesFor<F, K> {
|
||||
return (
|
||||
typeName === undefined ? typesByFork[fork] : typesByFork[fork][typeName as keyof SSZTypesByFork[F]]
|
||||
) as SSZTypesFor<F, K>;
|
||||
const sszTypes = typesByFork[fork];
|
||||
|
||||
if (sszTypes === undefined) {
|
||||
throw Error(`SSZ types for fork ${fork} are not defined`);
|
||||
}
|
||||
|
||||
return (typeName === undefined ? sszTypes : sszTypes[typeName as keyof SSZTypesByFork[F]]) as SSZTypesFor<F, K>;
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
},
|
||||
"types": "lib/index.d.ts",
|
||||
"dependencies": {
|
||||
"@chainsafe/as-sha256": "^1.1.0",
|
||||
"@chainsafe/as-sha256": "^1.2.0",
|
||||
"any-signal": "^4.1.1",
|
||||
"bigint-buffer": "^1.1.5",
|
||||
"case": "^1.6.3",
|
||||
|
||||
@@ -46,7 +46,7 @@
|
||||
],
|
||||
"dependencies": {
|
||||
"@chainsafe/blst": "^2.2.0",
|
||||
"@chainsafe/ssz": "^1.2.0",
|
||||
"@chainsafe/ssz": "^1.2.1",
|
||||
"@lodestar/api": "^1.30.0",
|
||||
"@lodestar/config": "^1.30.0",
|
||||
"@lodestar/db": "^1.30.0",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import {ChainConfig, SpecJson, chainConfigToJson} from "@lodestar/config";
|
||||
import {BlobScheduleEntry, ChainConfig, SpecJson, chainConfigToJson, deserializeBlobSchedule} from "@lodestar/config";
|
||||
import {BeaconPreset, activePreset, presetToJson} from "@lodestar/params";
|
||||
|
||||
export class NotEqualParamsError extends Error {}
|
||||
@@ -52,6 +52,35 @@ export function assertEqualParams(localConfig: ChainConfig, externalSpecJson: Sp
|
||||
continue;
|
||||
}
|
||||
|
||||
if (key === "BLOB_SCHEDULE") {
|
||||
const localBlobSchedule = deserializeBlobSchedule(localSpecJson[key]).sort((a, b) => a.EPOCH - b.EPOCH);
|
||||
const remoteBlobSchedule = deserializeBlobSchedule(externalSpecJson[key]).sort((a, b) => a.EPOCH - b.EPOCH);
|
||||
|
||||
if (localBlobSchedule.length !== remoteBlobSchedule.length) {
|
||||
errors.push(`BLOB_SCHEDULE different length: ${localBlobSchedule.length} != ${remoteBlobSchedule.length}`);
|
||||
|
||||
// Skip per entry comparison
|
||||
continue;
|
||||
}
|
||||
|
||||
for (let i = 0; i < localBlobSchedule.length; i++) {
|
||||
const localEntry = localBlobSchedule[i];
|
||||
const remoteEntry = remoteBlobSchedule[i];
|
||||
|
||||
for (const entryKey of ["EPOCH", "MAX_BLOBS_PER_BLOCK"] as Array<keyof BlobScheduleEntry>) {
|
||||
const localValue = String(localEntry[entryKey]);
|
||||
const remoteValue = String(remoteEntry[entryKey]);
|
||||
|
||||
if (localValue !== remoteValue) {
|
||||
errors.push(`BLOB_SCHEDULE[${i}].${entryKey} different value: ${localValue} != ${remoteValue}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip generic string comparison
|
||||
continue;
|
||||
}
|
||||
|
||||
// Must compare JSON serialized specs, to ensure all strings are rendered in the same way
|
||||
// Must compare as lowercase to ensure checksum addresses and names have same capilatization
|
||||
const localValue = String(localSpecJson[key]).toLocaleLowerCase();
|
||||
|
||||
@@ -72,6 +72,7 @@ import fs from "node:fs";
|
||||
* @typedef {Object} TemplatingListItem
|
||||
* @property {string} name
|
||||
* @property {string} query
|
||||
* @property {{text: string; value: string}} current
|
||||
*/
|
||||
|
||||
const variableNameDatasource = "DS_PROMETHEUS";
|
||||
@@ -102,7 +103,7 @@ export function lintGrafanaDashboard(json) {
|
||||
// Add job names to __inputs if used by dashboard
|
||||
if (json.templating && json.templating.list) {
|
||||
for (const item of json.templating.list) {
|
||||
if (item.query === "${VAR_BEACON_JOB}") {
|
||||
if (item.query === "${VAR_BEACON_JOB}" || item.query === "beacon") {
|
||||
inputs.push({
|
||||
description: "",
|
||||
label: "Beacon node job name",
|
||||
@@ -110,7 +111,11 @@ export function lintGrafanaDashboard(json) {
|
||||
type: "constant",
|
||||
value: "beacon",
|
||||
});
|
||||
} else if (item.query === "${VAR_VALIDATOR_JOB}") {
|
||||
if (item.current) {
|
||||
item.current.text = item.current.value = "${VAR_BEACON_JOB}";
|
||||
}
|
||||
item.query = "${VAR_BEACON_JOB}";
|
||||
} else if (item.query === "${VAR_VALIDATOR_JOB}" || item.query === "validator") {
|
||||
inputs.push({
|
||||
description: "",
|
||||
label: "Validator client job name",
|
||||
@@ -118,6 +123,10 @@ export function lintGrafanaDashboard(json) {
|
||||
type: "constant",
|
||||
value: "validator",
|
||||
});
|
||||
if (item.current) {
|
||||
item.current.text = item.current.value = "${VAR_VALIDATOR_JOB}";
|
||||
}
|
||||
item.query = "${VAR_VALIDATOR_JOB}";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
51
yarn.lock
51
yarn.lock
@@ -431,10 +431,10 @@
|
||||
resolved "https://registry.yarnpkg.com/@chainsafe/as-chacha20poly1305/-/as-chacha20poly1305-0.1.0.tgz#7da6f8796f9b42dac6e830a086d964f1f9189e09"
|
||||
integrity sha512-BpNcL8/lji/GM3+vZ/bgRWqJ1q5kwvTFmGPk7pxm/QQZDbaMI98waOHjEymTjq2JmdD/INdNBFOVSyJofXg7ew==
|
||||
|
||||
"@chainsafe/as-sha256@1.1.0", "@chainsafe/as-sha256@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@chainsafe/as-sha256/-/as-sha256-1.1.0.tgz#949082ab96e0b266484f01f59a71930761afc6c4"
|
||||
integrity sha512-pLlxYtfYy2YW5GN+7d946UAjBOS9VOFulkfFN6Z+84ZhMP0Ey8XsCG21CZTczwq1C8J7/4z8LGzmrAtmQ37VCQ==
|
||||
"@chainsafe/as-sha256@1.2.0", "@chainsafe/as-sha256@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@chainsafe/as-sha256/-/as-sha256-1.2.0.tgz#5764ac9959e147fe0908dd0f66c0cce525a633b3"
|
||||
integrity sha512-H2BNHQ5C3RS+H0ZvOdovK6GjFAyq5T6LClad8ivwj9Oaiy28uvdsGVS7gNJKuZmg0FGHAI+n7F0Qju6U0QkKDA==
|
||||
|
||||
"@chainsafe/as-sha256@^0.4.1":
|
||||
version "0.4.1"
|
||||
@@ -680,12 +680,12 @@
|
||||
dependencies:
|
||||
"@chainsafe/is-ip" "^2.0.1"
|
||||
|
||||
"@chainsafe/persistent-merkle-tree@1.1.0", "@chainsafe/persistent-merkle-tree@^1.1.0":
|
||||
version "1.1.0"
|
||||
resolved "https://registry.yarnpkg.com/@chainsafe/persistent-merkle-tree/-/persistent-merkle-tree-1.1.0.tgz#d10d9e926f2c5751d02a188b8fb6823821572aca"
|
||||
integrity sha512-UIcKEGkEGghTXbFTvKqIiN2iljg2f6c2Y8GxdQEyle5UI6YIB8d3ACYTkAhrHSB4YsNlG9pc/A0NGJw/3Hf9wQ==
|
||||
"@chainsafe/persistent-merkle-tree@1.2.0", "@chainsafe/persistent-merkle-tree@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@chainsafe/persistent-merkle-tree/-/persistent-merkle-tree-1.2.0.tgz#a402debcae6b386c51564c8cbacc9b0d56f64486"
|
||||
integrity sha512-Ng2eqd6OPvFPPuroQ659ZrFMHtc44LxUfK7K2WkoBhlQ3hrvIn3UTQNKc77xUCU40xjeBGSxAfz+MSV256i+/g==
|
||||
dependencies:
|
||||
"@chainsafe/as-sha256" "1.1.0"
|
||||
"@chainsafe/as-sha256" "1.2.0"
|
||||
"@chainsafe/hashtree" "1.0.1"
|
||||
"@noble/hashes" "^1.3.0"
|
||||
|
||||
@@ -769,13 +769,13 @@
|
||||
"@chainsafe/as-sha256" "^0.4.1"
|
||||
"@chainsafe/persistent-merkle-tree" "^0.6.1"
|
||||
|
||||
"@chainsafe/ssz@^1.2.0":
|
||||
version "1.2.0"
|
||||
resolved "https://registry.yarnpkg.com/@chainsafe/ssz/-/ssz-1.2.0.tgz#7201cc885460bfd4cf991791070100c3512aba1e"
|
||||
integrity sha512-fsFFBfT5JPCypyzENDg6srd6woNMj0+x6OLR5X1di+IK5mYlxBiAVsH/bYVj/u5DE3nanAgPZOc5KSszoDBRvw==
|
||||
"@chainsafe/ssz@^1.2.1":
|
||||
version "1.2.1"
|
||||
resolved "https://registry.yarnpkg.com/@chainsafe/ssz/-/ssz-1.2.1.tgz#a9a89c9706de33444c0ee64fbc461ae6001132af"
|
||||
integrity sha512-rchrNF+tJ1yOMZS5CQK3bK5UZC1vQP8ANxW4McRedvBls5b9Mvn+LtbOE4wffrVMOxQzOXcBGZDoCPPJYX76Rg==
|
||||
dependencies:
|
||||
"@chainsafe/as-sha256" "1.1.0"
|
||||
"@chainsafe/persistent-merkle-tree" "1.1.0"
|
||||
"@chainsafe/as-sha256" "1.2.0"
|
||||
"@chainsafe/persistent-merkle-tree" "1.2.0"
|
||||
|
||||
"@chainsafe/swap-or-not-shuffle-darwin-arm64@1.2.1":
|
||||
version "1.2.1"
|
||||
@@ -831,10 +831,10 @@
|
||||
"@chainsafe/swap-or-not-shuffle-win32-arm64-msvc" "1.2.1"
|
||||
"@chainsafe/swap-or-not-shuffle-win32-x64-msvc" "1.2.1"
|
||||
|
||||
"@chainsafe/threads@^1.11.1":
|
||||
version "1.11.1"
|
||||
resolved "https://registry.yarnpkg.com/@chainsafe/threads/-/threads-1.11.1.tgz#0b3b8c76f5875043ef6d47aeeb681dc80378f205"
|
||||
integrity sha512-ejkB0eVcM0k2E8n5ZqOGt//ZEWU+c431QS3e/WfjLKxhw/fwZpvYhUOxBA8u3lJmAKLGziIcXjOoTnPwMPhkcQ==
|
||||
"@chainsafe/threads@^1.11.2":
|
||||
version "1.11.2"
|
||||
resolved "https://registry.yarnpkg.com/@chainsafe/threads/-/threads-1.11.2.tgz#fc400efdfec2d246e5054e3521a9066dc2dc45af"
|
||||
integrity sha512-qd5mSYWyIgK+G4LQTLhJhO4IUda4eydYhZsT6AadALFUKs5WrHaFtAf7sLol6JjgWG7wwgznUs7FFyBl0xPmoQ==
|
||||
dependencies:
|
||||
callsites "^3.1.0"
|
||||
debug "^4.2.0"
|
||||
@@ -3357,6 +3357,11 @@
|
||||
resolved "https://registry.yarnpkg.com/@types/cookie/-/cookie-0.6.0.tgz#eac397f28bf1d6ae0ae081363eca2f425bedf0d5"
|
||||
integrity sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==
|
||||
|
||||
"@types/cpu-features@^0.0.3":
|
||||
version "0.0.3"
|
||||
resolved "https://registry.yarnpkg.com/@types/cpu-features/-/cpu-features-0.0.3.tgz#a2bec076eb5dc95e0a6c23d1f8d389be4109b309"
|
||||
integrity sha512-W/Ep+LDZoxMbCcH7LHRB3RN+TY4gbHl3u4uRq4XsxOh1gnpf5Lkwy5xWTBKSaJYQuMLW2XPAmRWA5Ucsy2EGVQ==
|
||||
|
||||
"@types/datastore-level@^3.0.0":
|
||||
version "3.0.0"
|
||||
resolved "https://registry.npmjs.org/@types/datastore-level/-/datastore-level-3.0.0.tgz"
|
||||
@@ -5380,6 +5385,14 @@ cosmiconfig@^8.2.0:
|
||||
parse-json "^5.2.0"
|
||||
path-type "^4.0.0"
|
||||
|
||||
cpu-features@^0.0.10:
|
||||
version "0.0.10"
|
||||
resolved "https://registry.yarnpkg.com/cpu-features/-/cpu-features-0.0.10.tgz#9aae536db2710c7254d7ed67cb3cbc7d29ad79c5"
|
||||
integrity sha512-9IkYqtX3YHPCzoVg1Py+o9057a3i0fp7S530UWokCSaFVTc7CwXPRiOjRjBQQ18ZCNafx78YfnG+HALxtVmOGA==
|
||||
dependencies:
|
||||
buildcheck "~0.0.6"
|
||||
nan "^2.19.0"
|
||||
|
||||
cpu-features@~0.0.4:
|
||||
version "0.0.8"
|
||||
resolved "https://registry.yarnpkg.com/cpu-features/-/cpu-features-0.0.8.tgz#a2d464b023b8ad09004c8cdca23b33f192f63546"
|
||||
|
||||
Reference in New Issue
Block a user