Merge branch 'status-im:stable' into stable

This commit is contained in:
ujscale
2023-07-20 20:57:13 +05:00
committed by GitHub
181 changed files with 6057 additions and 2280 deletions

View File

@@ -33,7 +33,7 @@ jobs:
cpu: amd64
- os: windows
cpu: amd64
branch: [version-1-6]
branch: [~, upstream/version-1-6]
include:
- target:
os: linux
@@ -52,35 +52,11 @@ jobs:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
name: ${{ matrix.target.os }}-${{ matrix.target.cpu }}${{ matrix.branch != '' && ' (Nim ' || '' }}${{ matrix.branch }}${{ matrix.branch != '' && ')' || '' }}
runs-on: ${{ matrix.builder }}
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
# submodules: true # Fails on nimyaml tests
- name: Check copyright year (Linux)
if: github.event_name == 'pull_request' && runner.os == 'Linux'
run: |
excluded_extensions="ans|json|md|png|txt"
current_year=$(date +"%Y")
outdated_files=()
while read -r file; do
if ! grep -qE 'Copyright \(c\) .*'$current_year' Status Research & Development GmbH' "$file"; then
outdated_files+=("$file")
fi
done < <(git diff --name-only --diff-filter=AM --ignore-submodules HEAD^ HEAD | grep -vE '\.('$excluded_extensions')$' || true)
if (( ${#outdated_files[@]} )); then
echo "The following files do not have an up-to-date copyright year:"
for file in "${outdated_files[@]}"; do
echo "- $file"
done
exit 2
fi
- name: MSYS2 (Windows amd64)
if: runner.os == 'Windows' && matrix.target.cpu == 'amd64'
@@ -163,25 +139,6 @@ jobs:
${make_cmd} -j ${ncpu} NIM_COMMIT=${{ matrix.branch }} ARCH_OVERRIDE=${PLATFORM} QUICK_AND_DIRTY_COMPILER=1 update
./env.sh nim --version
- name: Check submodules (Linux)
if: github.event_name == 'pull_request' && runner.os == 'Linux'
run: |
while read -r file; do
commit="$(git -C "$file" rev-parse HEAD)"
if ! branch="$(git config -f .gitmodules --get "submodule.$file.branch")"; then
echo "Submodule '$file': '.gitmodules' lacks 'branch' entry"
exit 2
fi
if ! error="$(git -C "$file" fetch -q origin "$branch")"; then
echo "Submodule '$file': Failed to fetch '$branch': $error"
exit 2
fi
if ! git -C "$file" merge-base --is-ancestor "$commit" "origin/$branch"; then
echo "Submodule '$file': '$commit' is not on '$branch'"
exit 2
fi
done < <(git diff --name-only --diff-filter=AM HEAD^ HEAD | grep -f <(git config --file .gitmodules --get-regexp path | awk '{ print $2 }') || true)
- name: Get latest fixtures commit hash
id: fixtures_version
run: |
@@ -222,13 +179,64 @@ jobs:
name: Unit Test Results ${{ matrix.target.os }}-${{ matrix.target.cpu }}
path: build/*.xml
lint:
name: "Lint"
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
submodules: 'recursive'
- name: Check copyright year
if: ${{ !cancelled() }} && github.event_name == 'pull_request'
run: |
excluded_files="config.yaml"
excluded_extensions="ans|json|md|png|ssz|txt"
current_year=$(date +"%Y")
outdated_files=()
while read -r file; do
if ! grep -qE 'Copyright \(c\) .*'$current_year' Status Research & Development GmbH' "$file"; then
outdated_files+=("$file")
fi
done < <(git diff --name-only --diff-filter=AM --ignore-submodules HEAD^ HEAD | grep -vE '(\.('$excluded_extensions')|'$excluded_files')$' || true)
if (( ${#outdated_files[@]} )); then
echo "The following files do not have an up-to-date copyright year:"
for file in "${outdated_files[@]}"; do
echo "- $file"
done
exit 2
fi
- name: Check submodules
if: ${{ !cancelled() }} && github.event_name == 'pull_request'
run: |
while read -r file; do
commit="$(git -C "$file" rev-parse HEAD)"
if ! branch="$(git config -f .gitmodules --get "submodule.$file.branch")"; then
echo "Submodule '$file': '.gitmodules' lacks 'branch' entry"
exit 2
fi
if ! error="$(git -C "$file" fetch -q origin "$branch")"; then
echo "Submodule '$file': Failed to fetch '$branch': $error"
exit 2
fi
if ! git -C "$file" merge-base --is-ancestor "$commit" "origin/$branch"; then
echo "Submodule '$file': '$commit' is not on '$branch'"
exit 2
fi
done < <(git diff --name-only --diff-filter=AM HEAD^ HEAD | grep -f <(git config --file .gitmodules --get-regexp path | awk '{ print $2 }') || true)
# https://github.com/EnricoMi/publish-unit-test-result-action
event_file:
name: "Event File"
runs-on: ubuntu-latest
steps:
- name: Upload
uses: actions/upload-artifact@v3
with:
name: Event File
path: ${{ github.event_path }}
- name: Upload
uses: actions/upload-artifact@v3
with:
name: Event File
path: ${{ github.event_path }}

2
.gitignore vendored
View File

@@ -51,7 +51,7 @@ build/
test_keymanager_api
test_sim
/libnfuzz_linkerArgs.txt
/*linkerArgs.txt
# scripts/geth_binaries.sh
geth-*.tar.gz

5
.gitmodules vendored
View File

@@ -215,3 +215,8 @@
url = https://github.com/status-im/nim-kzg4844.git
ignore = untracked
branch = master
[submodule "vendor/nim-results"]
path = vendor/nim-results
url = https://github.com/arnetheduck/nim-results.git
ignore = untracked
branch = master

View File

@@ -1,5 +1,10 @@
AllTests-mainnet
===
## Ancestry
```diff
+ ancestorSlot OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Attestation pool processing [Preset: mainnet]
```diff
+ Attestation from different branch [Preset: mainnet] OK
@@ -100,11 +105,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 2/2 Fail: 0/2 Skip: 0/2
## BlockRef and helpers
```diff
+ commonAncestor sanity OK
+ get_ancestor sanity OK
+ isAncestorOf sanity OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
OK: 2/2 Fail: 0/2 Skip: 0/2
## BlockSlot and helpers
```diff
+ atSlot sanity OK
@@ -258,10 +262,11 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
+ General pubsub topics OK
+ Liveness failsafe conditions OK
+ Mainnet attestation topics OK
+ Stability subnets OK
+ isNearSyncCommitteePeriod OK
+ is_aggregator OK
```
OK: 5/5 Fail: 0/5 Skip: 0/5
OK: 6/6 Fail: 0/6 Skip: 0/6
## ImportKeystores requests [Beacon Node] [Preset: mainnet]
```diff
+ ImportKeystores/ListKeystores/DeleteKeystores [Beacon Node] [Preset: mainnet] OK
@@ -368,14 +373,12 @@ OK: 9/9 Fail: 0/9 Skip: 0/9
OK: 3/3 Fail: 0/3 Skip: 0/3
## Nimbus remote signer/signing test (verifying-web3signer)
```diff
+ Signing BeaconBlock (getBlockSignature(altair)) OK
+ Signing BeaconBlock (getBlockSignature(bellatrix)) OK
+ Signing BeaconBlock (getBlockSignature(capella)) OK
+ Signing BeaconBlock (getBlockSignature(deneb)) OK
+ Signing BeaconBlock (getBlockSignature(phase0)) OK
+ Waiting for signing node (/upcheck) test OK
```
OK: 6/6 Fail: 0/6 Skip: 0/6
OK: 4/4 Fail: 0/4 Skip: 0/4
## Nimbus remote signer/signing test (web3signer)
```diff
+ Connection timeout test OK
@@ -383,11 +386,9 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
+ Idle connection test OK
+ Public keys enumeration (/api/v1/eth2/publicKeys) test OK
+ Public keys reload (/reload) test OK
+ Signing BeaconBlock (getBlockSignature(altair)) OK
+ Signing BeaconBlock (getBlockSignature(bellatrix)) OK
+ Signing BeaconBlock (getBlockSignature(capella)) OK
+ Signing BeaconBlock (getBlockSignature(deneb)) OK
+ Signing BeaconBlock (getBlockSignature(phase0)) OK
+ Signing SC contribution and proof (getContributionAndProofSignature()) OK
+ Signing SC message (getSyncCommitteeMessage()) OK
+ Signing SC selection proof (getSyncCommitteeSelectionProof()) OK
@@ -400,7 +401,7 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
+ Signing voluntary exit (getValidatorExitSignature()) OK
+ Waiting for signing node (/upcheck) test OK
```
OK: 21/21 Fail: 0/21 Skip: 0/21
OK: 19/19 Fail: 0/19 Skip: 0/19
## Old database versions [Preset: mainnet]
```diff
+ pre-1.1.0 OK
@@ -450,8 +451,15 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
## Shufflings
```diff
+ Accelerated shuffling computation OK
+ Accelerated shuffling computation (with epochRefState jump) OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 2/2 Fail: 0/2 Skip: 0/2
## Shufflings (merged)
```diff
+ Accelerated shuffling computation OK
+ Accelerated shuffling computation (with epochRefState jump) OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## Slashing Interchange tests [Preset: mainnet]
```diff
+ Slashing test: duplicate_pubkey_not_slashable.json OK
@@ -578,9 +586,10 @@ OK: 24/24 Fail: 0/24 Skip: 0/24
OK: 1/1 Fail: 0/1 Skip: 0/1
## Validator Client test suite
```diff
+ getAttestationDataScore() test vectors OK
+ normalizeUri() test vectors OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 2/2 Fail: 0/2 Skip: 0/2
## Validator change pool testing suite
```diff
+ addValidatorChangeMessage/getAttesterSlashingMessage OK
@@ -691,4 +700,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 9/9 Fail: 0/9 Skip: 0/9
---TOTAL---
OK: 392/397 Fail: 0/397 Skip: 5/397
OK: 393/398 Fail: 0/398 Skip: 5/398

View File

@@ -1,9 +1,47 @@
2023-07-19 v23.7.0
Nimbus `v23.7.0` is a `low-priority` upgrade, bringing advanced profit optimisation capabilities to the Nimbus validator client and adressing risk factors that can contribute to poorer validator performance.
### Improvements
* The Nimbus validator client now uses a scoring algorithm capable of selecting the most optimal attestation data when working with multiple beacon nodes:
https://github.com/status-im/nimbus-eth2/pull/5101
* The Nimbus validator client now synchronizes its clock with the Nimbus beacon node in order to eliminate any risks for poor validator performance stemming from de-synchronized clocks:
https://github.com/status-im/nimbus-eth2/pull/4846
* The `/eth/v1/beacon/states/{state_id}/*` family of REST end-points now support queries by state root as long as the state is within the most recent 8192 slots (approximately 27 hours):
https://github.com/status-im/nimbus-eth2/pull/5155
* Improved validation of blocks during syncing allows Nimbus to optimize the initial syncing target of the execution layer node:
https://github.com/status-im/nimbus-eth2/pull/5169
* The Nimbus light client is now available a C library for easy reuse and embedding in other software (alpha release):
https://github.com/status-im/nimbus-eth2/pull/5122
### Fixes
* Due to multiple reports of slow start-up times on certain hardware configurations, caused by the one-time initial pruning performed by Nimbus v23.6.0 and v23.6.1, this functionality has been temporarily disabled:
https://github.com/status-im/nimbus-eth2/pull/5191
* The block monitoring performed by the Nimbus validator client was permanently interrupted in certain situations after a timed out request to the beacon node:
https://github.com/status-im/nimbus-eth2/pull/5109
* Nimbus now uses the most up-to-date bootstrap nodes for the Gnosis chain:
https://github.com/status-im/nimbus-eth2/pull/5175
* Nimbus has addressed a minor risk for missed block proposals at epoch boundaries due to multiple compounding risk factors:
https://github.com/status-im/nimbus-eth2/pull/5195
https://github.com/status-im/nimbus-eth2/pull/5196
https://github.com/status-im/nimbus-eth2/pull/5194
2023-06-26 v23.6.1
==================
Nimbus `v23.6.1` is a `low-urgency` point release significantly improving the performance of database pruning on Nimbus instances that have accumulated history prior to April 2021 (Nimbus 1.1.0). Affected users are advised to upgrade as soon as possible in order to reduce the risk of missed attestations and blocks.
Fixes:
### Fixes
* The legacy Nimbus database is not subjected to pruning due to the high I/O cost of the operations:
https://github.com/status-im/nimbus-eth2/pull/5116

View File

@@ -739,6 +739,7 @@ ConsensusSpecPreset-mainnet
+ [Valid] EF - Deneb - Sanity - Blocks - historical_batch [Preset: mainnet] OK
+ [Valid] EF - Deneb - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pre OK
+ [Valid] EF - Deneb - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK
+ [Valid] EF - Deneb - Sanity - Blocks - include_attestation_from_previous_fork_with_new_r OK
+ [Valid] EF - Deneb - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pre OK
+ [Valid] EF - Deneb - Sanity - Blocks - max_blobs_per_block [Preset: mainnet] OK
+ [Valid] EF - Deneb - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: m OK
@@ -809,10 +810,10 @@ ConsensusSpecPreset-mainnet
+ [Valid] EF - Phase 0 - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK
+ [Valid] EF - Phase 0 - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK
```
OK: 798/806 Fail: 0/806 Skip: 8/806
OK: 799/807 Fail: 0/807 Skip: 8/807
## Attestation
```diff
+ [Invalid] EF - Altair - Operations - Attestation - invalid_after_epoch_slots OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_after_max_inclusion_slot OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_attestation_signature OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_bad_source_root OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_before_inclusion_delay OK
@@ -822,7 +823,7 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Invalid] EF - Altair - Operations - Attestation - invalid_empty_participants_zeroes_sig OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_future_target_epoch OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_incorrect_head_and_target_inclu OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_incorrect_head_included_after_e OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_incorrect_head_included_after_m OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_incorrect_target_included_after OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_index OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_mismatched_target_and_slot OK
@@ -836,7 +837,7 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Invalid] EF - Altair - Operations - Attestation - invalid_wrong_index_for_committee_signa OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_after_epoch_slots OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_after_max_inclusion_slot OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_attestation_signature OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_bad_source_root OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_before_inclusion_delay OK
@@ -860,7 +861,7 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_wrong_index_for_committee_si OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_after_epoch_slots OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_after_max_inclusion_slot OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_attestation_signature OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_bad_source_root OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_before_inclusion_delay OK
@@ -884,7 +885,7 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Invalid] EF - Capella - Operations - Attestation - invalid_wrong_index_for_committee_sign OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_after_epoch_slots OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_after_max_inclusion_slot OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_attestation_signature OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_bad_source_root OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_before_inclusion_delay OK
@@ -894,7 +895,7 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_empty_participants_zeroes_sig OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_future_target_epoch OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_incorrect_head_and_target_includ OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_incorrect_head_included_after_ep OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_incorrect_head_included_after_ma OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_incorrect_target_included_after_ OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_index OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_mismatched_target_and_slot OK
@@ -908,7 +909,7 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_wrong_index_for_committee_signat OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_after_epoch_slots OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_after_max_inclusion_slot OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_attestation_signature OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_bad_source_root OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_before_inclusion_delay OK
@@ -932,13 +933,15 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_wrong_index_for_committee_sign OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
+ [Valid] EF - Altair - Operations - Attestation - at_max_inclusion_slot OK
+ [Valid] EF - Altair - Operations - Attestation - correct_attestation_included_at_max_inc OK
+ [Valid] EF - Altair - Operations - Attestation - correct_attestation_included_at_min_inc OK
+ [Valid] EF - Altair - Operations - Attestation - correct_attestation_included_at_one_epo OK
+ [Valid] EF - Altair - Operations - Attestation - correct_attestation_included_at_sqrt_ep OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_and_target_included_at_e OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_and_target_included_at_s OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_and_target_min_inclusion OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_included_at_epoch_delay OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_included_at_max_inclusio OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_included_at_min_inclusio OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_d OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_target_included_at_epoch_dela OK
@@ -947,13 +950,15 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Valid] EF - Altair - Operations - Attestation - multi_proposer_index_iterations OK
+ [Valid] EF - Altair - Operations - Attestation - one_basic_attestation OK
+ [Valid] EF - Altair - Operations - Attestation - previous_epoch OK
+ [Valid] EF - Bellatrix - Operations - Attestation - at_max_inclusion_slot OK
+ [Valid] EF - Bellatrix - Operations - Attestation - correct_attestation_included_at_max_ OK
+ [Valid] EF - Bellatrix - Operations - Attestation - correct_attestation_included_at_min_ OK
+ [Valid] EF - Bellatrix - Operations - Attestation - correct_attestation_included_at_one_ OK
+ [Valid] EF - Bellatrix - Operations - Attestation - correct_attestation_included_at_sqrt OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_and_target_included_a OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_and_target_included_a OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_and_target_min_inclus OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_included_at_epoch_del OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_included_at_max_inclu OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_included_at_min_inclu OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_included_at_sqrt_epoc OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_target_included_at_epoch_d OK
@@ -962,13 +967,15 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Valid] EF - Bellatrix - Operations - Attestation - multi_proposer_index_iterations OK
+ [Valid] EF - Bellatrix - Operations - Attestation - one_basic_attestation OK
+ [Valid] EF - Bellatrix - Operations - Attestation - previous_epoch OK
+ [Valid] EF - Capella - Operations - Attestation - at_max_inclusion_slot OK
+ [Valid] EF - Capella - Operations - Attestation - correct_attestation_included_at_max_in OK
+ [Valid] EF - Capella - Operations - Attestation - correct_attestation_included_at_min_in OK
+ [Valid] EF - Capella - Operations - Attestation - correct_attestation_included_at_one_ep OK
+ [Valid] EF - Capella - Operations - Attestation - correct_attestation_included_at_sqrt_e OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_and_target_included_at_ OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_and_target_included_at_ OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_and_target_min_inclusio OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_included_at_epoch_delay OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_included_at_max_inclusi OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_included_at_min_inclusi OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_ OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_target_included_at_epoch_del OK
@@ -977,13 +984,15 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Valid] EF - Capella - Operations - Attestation - multi_proposer_index_iterations OK
+ [Valid] EF - Capella - Operations - Attestation - one_basic_attestation OK
+ [Valid] EF - Capella - Operations - Attestation - previous_epoch OK
+ [Valid] EF - Deneb - Operations - Attestation - at_max_inclusion_slot OK
+ [Valid] EF - Deneb - Operations - Attestation - correct_attestation_included_at_max_incl OK
+ [Valid] EF - Deneb - Operations - Attestation - correct_attestation_included_at_min_incl OK
+ [Valid] EF - Deneb - Operations - Attestation - correct_attestation_included_at_one_epoc OK
+ [Valid] EF - Deneb - Operations - Attestation - correct_attestation_included_at_sqrt_epo OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_and_target_included_at_ep OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_and_target_included_at_sq OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_and_target_min_inclusion_ OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_included_at_epoch_delay OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_included_at_max_inclusion OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_included_at_min_inclusion OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_de OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_target_included_at_epoch_delay OK
@@ -992,13 +1001,15 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Valid] EF - Deneb - Operations - Attestation - multi_proposer_index_iterations OK
+ [Valid] EF - Deneb - Operations - Attestation - one_basic_attestation OK
+ [Valid] EF - Deneb - Operations - Attestation - previous_epoch OK
+ [Valid] EF - Phase 0 - Operations - Attestation - at_max_inclusion_slot OK
+ [Valid] EF - Phase 0 - Operations - Attestation - correct_attestation_included_at_max_in OK
+ [Valid] EF - Phase 0 - Operations - Attestation - correct_attestation_included_at_min_in OK
+ [Valid] EF - Phase 0 - Operations - Attestation - correct_attestation_included_at_one_ep OK
+ [Valid] EF - Phase 0 - Operations - Attestation - correct_attestation_included_at_sqrt_e OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_and_target_included_at_ OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_and_target_included_at_ OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_and_target_min_inclusio OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_included_at_epoch_delay OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_included_at_max_inclusi OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_included_at_min_inclusi OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_ OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_target_included_at_epoch_del OK
@@ -1008,7 +1019,7 @@ OK: 798/806 Fail: 0/806 Skip: 8/806
+ [Valid] EF - Phase 0 - Operations - Attestation - one_basic_attestation OK
+ [Valid] EF - Phase 0 - Operations - Attestation - previous_epoch OK
```
OK: 195/195 Fail: 0/195 Skip: 0/195
OK: 205/205 Fail: 0/205 Skip: 0/205
## Attester Slashing
```diff
+ [Invalid] EF - Altair - Operations - Attester Slashing - invalid_all_empty_indices OK
@@ -2472,9 +2483,9 @@ OK: 104/104 Fail: 0/104 Skip: 0/104
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_validator_not_active OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_validator_not_active_long_eno OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_f OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_f OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_f OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_f OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_voluntary_exit_with_previous_ OK
+ [Invalid] EF - Phase 0 - Operations - Voluntary Exit - invalid_incorrect_signature OK
+ [Invalid] EF - Phase 0 - Operations - Voluntary Exit - invalid_validator_already_exited OK
+ [Invalid] EF - Phase 0 - Operations - Voluntary Exit - invalid_validator_exit_in_future OK
@@ -2497,7 +2508,7 @@ OK: 104/104 Fail: 0/104 Skip: 0/104
+ [Valid] EF - Deneb - Operations - Voluntary Exit - basic OK
+ [Valid] EF - Deneb - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK
+ [Valid] EF - Deneb - Operations - Voluntary Exit - success_exit_queue__min_churn OK
+ [Valid] EF - Deneb - Operations - Voluntary Exit - voluntary_exit_with_current_fork_vers OK
+ [Valid] EF - Deneb - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK
+ [Valid] EF - Deneb - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK
+ [Valid] EF - Phase 0 - Operations - Voluntary Exit - basic OK
+ [Valid] EF - Phase 0 - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK
@@ -2610,4 +2621,4 @@ OK: 63/63 Fail: 0/63 Skip: 0/63
OK: 100/100 Fail: 0/100 Skip: 0/100
---TOTAL---
OK: 2308/2316 Fail: 0/2316 Skip: 8/2316
OK: 2319/2327 Fail: 0/2327 Skip: 8/2327

View File

@@ -927,6 +927,7 @@ ConsensusSpecPreset-minimal
+ [Valid] EF - Deneb - Sanity - Blocks - historical_batch [Preset: minimal] OK
+ [Valid] EF - Deneb - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pre OK
+ [Valid] EF - Deneb - Sanity - Blocks - inactivity_scores_leaking [Preset: minimal] OK
+ [Valid] EF - Deneb - Sanity - Blocks - include_attestation_from_previous_fork_with_new_r OK
+ [Valid] EF - Deneb - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pre OK
+ [Valid] EF - Deneb - Sanity - Blocks - max_blobs_per_block [Preset: minimal] OK
+ [Valid] EF - Deneb - Sanity - Blocks - multiple_attester_slashings_no_overlap [Preset: m OK
@@ -1002,10 +1003,10 @@ ConsensusSpecPreset-minimal
+ [Valid] EF - Phase 0 - Sanity - Blocks - slash_and_exit_diff_index [Preset: minimal] OK
+ [Valid] EF - Phase 0 - Sanity - Blocks - voluntary_exit [Preset: minimal] OK
```
OK: 991/999 Fail: 0/999 Skip: 8/999
OK: 992/1000 Fail: 0/1000 Skip: 8/1000
## Attestation
```diff
+ [Invalid] EF - Altair - Operations - Attestation - invalid_after_epoch_slots OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_after_max_inclusion_slot OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_attestation_signature OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_bad_source_root OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_before_inclusion_delay OK
@@ -1015,7 +1016,7 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Invalid] EF - Altair - Operations - Attestation - invalid_empty_participants_zeroes_sig OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_future_target_epoch OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_incorrect_head_and_target_inclu OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_incorrect_head_included_after_e OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_incorrect_head_included_after_m OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_incorrect_target_included_after OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_index OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_mismatched_target_and_slot OK
@@ -1029,7 +1030,7 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Invalid] EF - Altair - Operations - Attestation - invalid_wrong_index_for_committee_signa OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
+ [Invalid] EF - Altair - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_after_epoch_slots OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_after_max_inclusion_slot OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_attestation_signature OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_bad_source_root OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_before_inclusion_delay OK
@@ -1053,7 +1054,7 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_wrong_index_for_committee_si OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
+ [Invalid] EF - Bellatrix - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_after_epoch_slots OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_after_max_inclusion_slot OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_attestation_signature OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_bad_source_root OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_before_inclusion_delay OK
@@ -1077,7 +1078,7 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Invalid] EF - Capella - Operations - Attestation - invalid_wrong_index_for_committee_sign OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
+ [Invalid] EF - Capella - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_after_epoch_slots OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_after_max_inclusion_slot OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_attestation_signature OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_bad_source_root OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_before_inclusion_delay OK
@@ -1087,7 +1088,7 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_empty_participants_zeroes_sig OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_future_target_epoch OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_incorrect_head_and_target_includ OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_incorrect_head_included_after_ep OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_incorrect_head_included_after_ma OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_incorrect_target_included_after_ OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_index OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_mismatched_target_and_slot OK
@@ -1101,7 +1102,7 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_wrong_index_for_committee_signat OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
+ [Invalid] EF - Deneb - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_after_epoch_slots OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_after_max_inclusion_slot OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_attestation_signature OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_bad_source_root OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_before_inclusion_delay OK
@@ -1125,13 +1126,15 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_wrong_index_for_committee_sign OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
+ [Invalid] EF - Phase 0 - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
+ [Valid] EF - Altair - Operations - Attestation - at_max_inclusion_slot OK
+ [Valid] EF - Altair - Operations - Attestation - correct_attestation_included_at_max_inc OK
+ [Valid] EF - Altair - Operations - Attestation - correct_attestation_included_at_min_inc OK
+ [Valid] EF - Altair - Operations - Attestation - correct_attestation_included_at_one_epo OK
+ [Valid] EF - Altair - Operations - Attestation - correct_attestation_included_at_sqrt_ep OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_and_target_included_at_e OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_and_target_included_at_s OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_and_target_min_inclusion OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_included_at_epoch_delay OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_included_at_max_inclusio OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_included_at_min_inclusio OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_d OK
+ [Valid] EF - Altair - Operations - Attestation - incorrect_target_included_at_epoch_dela OK
@@ -1140,13 +1143,15 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Valid] EF - Altair - Operations - Attestation - multi_proposer_index_iterations OK
+ [Valid] EF - Altair - Operations - Attestation - one_basic_attestation OK
+ [Valid] EF - Altair - Operations - Attestation - previous_epoch OK
+ [Valid] EF - Bellatrix - Operations - Attestation - at_max_inclusion_slot OK
+ [Valid] EF - Bellatrix - Operations - Attestation - correct_attestation_included_at_max_ OK
+ [Valid] EF - Bellatrix - Operations - Attestation - correct_attestation_included_at_min_ OK
+ [Valid] EF - Bellatrix - Operations - Attestation - correct_attestation_included_at_one_ OK
+ [Valid] EF - Bellatrix - Operations - Attestation - correct_attestation_included_at_sqrt OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_and_target_included_a OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_and_target_included_a OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_and_target_min_inclus OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_included_at_epoch_del OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_included_at_max_inclu OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_included_at_min_inclu OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_head_included_at_sqrt_epoc OK
+ [Valid] EF - Bellatrix - Operations - Attestation - incorrect_target_included_at_epoch_d OK
@@ -1155,13 +1160,15 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Valid] EF - Bellatrix - Operations - Attestation - multi_proposer_index_iterations OK
+ [Valid] EF - Bellatrix - Operations - Attestation - one_basic_attestation OK
+ [Valid] EF - Bellatrix - Operations - Attestation - previous_epoch OK
+ [Valid] EF - Capella - Operations - Attestation - at_max_inclusion_slot OK
+ [Valid] EF - Capella - Operations - Attestation - correct_attestation_included_at_max_in OK
+ [Valid] EF - Capella - Operations - Attestation - correct_attestation_included_at_min_in OK
+ [Valid] EF - Capella - Operations - Attestation - correct_attestation_included_at_one_ep OK
+ [Valid] EF - Capella - Operations - Attestation - correct_attestation_included_at_sqrt_e OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_and_target_included_at_ OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_and_target_included_at_ OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_and_target_min_inclusio OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_included_at_epoch_delay OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_included_at_max_inclusi OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_included_at_min_inclusi OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_ OK
+ [Valid] EF - Capella - Operations - Attestation - incorrect_target_included_at_epoch_del OK
@@ -1170,13 +1177,15 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Valid] EF - Capella - Operations - Attestation - multi_proposer_index_iterations OK
+ [Valid] EF - Capella - Operations - Attestation - one_basic_attestation OK
+ [Valid] EF - Capella - Operations - Attestation - previous_epoch OK
+ [Valid] EF - Deneb - Operations - Attestation - at_max_inclusion_slot OK
+ [Valid] EF - Deneb - Operations - Attestation - correct_attestation_included_at_max_incl OK
+ [Valid] EF - Deneb - Operations - Attestation - correct_attestation_included_at_min_incl OK
+ [Valid] EF - Deneb - Operations - Attestation - correct_attestation_included_at_one_epoc OK
+ [Valid] EF - Deneb - Operations - Attestation - correct_attestation_included_at_sqrt_epo OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_and_target_included_at_ep OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_and_target_included_at_sq OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_and_target_min_inclusion_ OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_included_at_epoch_delay OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_included_at_max_inclusion OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_included_at_min_inclusion OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_de OK
+ [Valid] EF - Deneb - Operations - Attestation - incorrect_target_included_at_epoch_delay OK
@@ -1185,13 +1194,15 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Valid] EF - Deneb - Operations - Attestation - multi_proposer_index_iterations OK
+ [Valid] EF - Deneb - Operations - Attestation - one_basic_attestation OK
+ [Valid] EF - Deneb - Operations - Attestation - previous_epoch OK
+ [Valid] EF - Phase 0 - Operations - Attestation - at_max_inclusion_slot OK
+ [Valid] EF - Phase 0 - Operations - Attestation - correct_attestation_included_at_max_in OK
+ [Valid] EF - Phase 0 - Operations - Attestation - correct_attestation_included_at_min_in OK
+ [Valid] EF - Phase 0 - Operations - Attestation - correct_attestation_included_at_one_ep OK
+ [Valid] EF - Phase 0 - Operations - Attestation - correct_attestation_included_at_sqrt_e OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_and_target_included_at_ OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_and_target_included_at_ OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_and_target_min_inclusio OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_included_at_epoch_delay OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_included_at_max_inclusi OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_included_at_min_inclusi OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_ OK
+ [Valid] EF - Phase 0 - Operations - Attestation - incorrect_target_included_at_epoch_del OK
@@ -1201,7 +1212,7 @@ OK: 991/999 Fail: 0/999 Skip: 8/999
+ [Valid] EF - Phase 0 - Operations - Attestation - one_basic_attestation OK
+ [Valid] EF - Phase 0 - Operations - Attestation - previous_epoch OK
```
OK: 195/195 Fail: 0/195 Skip: 0/195
OK: 205/205 Fail: 0/205 Skip: 0/205
## Attester Slashing
```diff
+ [Invalid] EF - Altair - Operations - Attester Slashing - invalid_all_empty_indices OK
@@ -2720,9 +2731,9 @@ OK: 96/96 Fail: 0/96 Skip: 0/96
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_validator_not_active OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_validator_not_active_long_eno OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_f OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_f OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_f OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_f OK
+ [Invalid] EF - Deneb - Operations - Voluntary Exit - invalid_voluntary_exit_with_previous_ OK
+ [Invalid] EF - Phase 0 - Operations - Voluntary Exit - invalid_incorrect_signature OK
+ [Invalid] EF - Phase 0 - Operations - Voluntary Exit - invalid_validator_already_exited OK
+ [Invalid] EF - Phase 0 - Operations - Voluntary Exit - invalid_validator_exit_in_future OK
@@ -2749,7 +2760,7 @@ OK: 96/96 Fail: 0/96 Skip: 0/96
+ [Valid] EF - Deneb - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK
+ [Valid] EF - Deneb - Operations - Voluntary Exit - success_exit_queue__min_churn OK
+ [Valid] EF - Deneb - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK
+ [Valid] EF - Deneb - Operations - Voluntary Exit - voluntary_exit_with_current_fork_vers OK
+ [Valid] EF - Deneb - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK
+ [Valid] EF - Deneb - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_ver OK
+ [Valid] EF - Phase 0 - Operations - Voluntary Exit - basic OK
+ [Valid] EF - Phase 0 - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK
@@ -2865,4 +2876,4 @@ OK: 68/68 Fail: 0/68 Skip: 0/68
OK: 102/102 Fail: 0/102 Skip: 0/102
---TOTAL---
OK: 2547/2555 Fail: 0/2555 Skip: 8/2555
OK: 2558/2566 Fail: 0/2566 Skip: 8/2566

View File

@@ -54,11 +54,14 @@ endif
# unconditionally built by the default Make target
# TODO re-enable ncli_query if/when it works again
TOOLS_CORE_CUSTOMCOMPILE := \
libnimbus_lc.a
TOOLS_CORE := \
deposit_contract \
resttest \
logtrace \
mev_mock \
mev_mock \
ncli \
ncli_db \
ncli_split_keystore \
@@ -69,7 +72,8 @@ TOOLS_CORE := \
nimbus_validator_client \
nimbus_signing_node \
validator_db_aggregator \
ncli_testnet
ncli_testnet \
$(TOOLS_CORE_CUSTOMCOMPILE)
# This TOOLS/TOOLS_CORE decomposition is a workaroud so nimbus_beacon_node can
# build on its own, and if/when that becomes a non-issue, it can be recombined
@@ -280,7 +284,8 @@ XML_TEST_BINARIES := \
# test suite
TEST_BINARIES := \
state_sim \
block_sim
block_sim \
test_libnimbus_lc
.PHONY: $(TEST_BINARIES) $(XML_TEST_BINARIES) force_build_alone_all_tests
# Preset-dependent tests
@@ -392,14 +397,23 @@ endif
rm -rf 0000-*.json t_slashprot_migration.* *.log block_sim_db
for TEST_BINARY in $(TEST_BINARIES); do \
PARAMS=""; \
REDIRECT=""; \
if [[ "$${TEST_BINARY}" == "state_sim" ]]; then PARAMS="--validators=8000 --slots=160"; \
elif [[ "$${TEST_BINARY}" == "block_sim" ]]; then PARAMS="--validators=8000 --slots=160"; \
elif [[ "$${TEST_BINARY}" == "test_libnimbus_lc" ]]; then REDIRECT="$${TEST_BINARY}.log"; \
fi; \
echo -e "\nRunning $${TEST_BINARY} $${PARAMS}\n"; \
build/$${TEST_BINARY} $${PARAMS} || { \
echo -e "\n$${TEST_BINARY} $${PARAMS} failed; Last 50 lines from the log:"; \
tail -n50 "$${TEST_BINARY}.log"; exit 1; \
}; \
if [[ "$${REDIRECT}" != "" ]]; then \
build/$${TEST_BINARY} $${PARAMS} > "$${REDIRECT}" && echo "OK" || { \
echo -e "\n$${TEST_BINARY} $${PARAMS} failed; Last 50 lines from the log:"; \
tail -n50 "$${TEST_BINARY}.log"; exit 1; \
}; \
else \
build/$${TEST_BINARY} $${PARAMS} || { \
echo -e "\n$${TEST_BINARY} $${PARAMS} failed; Last 50 lines from the log:"; \
tail -n50 "$${TEST_BINARY}.log"; exit 1; \
}; \
fi; \
done; \
rm -rf 0000-*.json t_slashprot_migration.* *.log block_sim_db
@@ -419,7 +433,7 @@ build/generate_makefile: tools/generate_makefile.nim | deps-common
# It also requires Make to pass open file descriptors to the GCC process,
# which is not possible if we let Nim handle this, so we generate and use a
# makefile instead.
$(TOOLS): | build deps
$(filter-out $(TOOLS_CORE_CUSTOMCOMPILE),$(TOOLS)): | build deps
+ for D in $(TOOLS_DIRS); do [ -e "$${D}/$@.nim" ] && TOOL_DIR="$${D}" && break; done && \
echo -e $(BUILD_MSG) "build/$@" && \
MAKE="$(MAKE)" V="$(V)" $(ENV_SCRIPT) scripts/compile_nim_program.sh $@ "$${TOOL_DIR}/$@.nim" $(NIM_PARAMS) && \
@@ -728,6 +742,34 @@ gnosis-chain-dev-deposit: | gnosis-build deposit_contract
clean-gnosis-chain:
$(call CLEAN_NETWORK,gnosis-chain)
###
### libnimbus_lc
###
libnimbus_lc.a: | build deps
+ echo -e $(BUILD_MSG) "build/$@" && \
set -x && \
rm -f build/$@ && \
$(ENV_SCRIPT) $(NIMC) c -d:disable_libbacktrace -d:release --app:staticlib --noMain --nimcache:nimcache/libnimbus_lc_static -o:build/$@ $(NIM_PARAMS) beacon_chain/libnimbus_lc/libnimbus_lc.nim $(SILENCE_WARNINGS) && \
echo -e $(BUILD_END_MSG) "build/$@"
# `-Wno-maybe-uninitialized` in Linux: https://github.com/nim-lang/Nim/issues/22246
test_libnimbus_lc: libnimbus_lc.a
+ echo -e $(BUILD_MSG) "build/$@" && \
set -x && \
case "$$(uname)" in \
Darwin) \
clang -D__DIR__="\"beacon_chain/libnimbus_lc\"" --std=c17 -Weverything -Werror -Wno-declaration-after-statement -Wno-nullability-extension -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk -o build/test_libnimbus_lc beacon_chain/libnimbus_lc/test_libnimbus_lc.c build/libnimbus_lc.a -framework Security; \
;; \
MINGW64_*) \
gcc -D__DIR__="\"beacon_chain/libnimbus_lc\"" --std=c17 -Wall -Wextra -pedantic -Werror -pedantic-errors -flto -o build/test_libnimbus_lc -D_CRT_SECURE_NO_WARNINGS beacon_chain/libnimbus_lc/test_libnimbus_lc.c build/libnimbus_lc.a; \
;; \
*) \
gcc -D__DIR__="\"beacon_chain/libnimbus_lc\"" --std=c17 -Wall -Wextra -pedantic -Werror -pedantic-errors -Wno-maybe-uninitialized -flto -o build/test_libnimbus_lc beacon_chain/libnimbus_lc/test_libnimbus_lc.c build/libnimbus_lc.a; \
;; \
esac && \
echo -e $(BUILD_END_MSG) "build/$@"
###
### Other
###
@@ -768,9 +810,9 @@ book:
"$(MAKE)" -C docs book
auditors-book:
[[ "$$(mdbook --version)" = "mdbook v0.4.18" ]] || { echo "'mdbook v0.4.18' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook --version)" = "mdbook v0.4.28" ]] || { echo "'mdbook v0.4.28' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook-toc --version)" == "mdbook-toc 0.8.0" ]] || { echo "'mdbook-toc 0.8.0' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook-open-on-gh --version)" == "mdbook-open-on-gh 2.1.0" ]] || { echo "'mdbook-open-on-gh 2.1.0' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook-open-on-gh --version)" == "mdbook-open-on-gh 2.3.3" ]] || { echo "'mdbook-open-on-gh 2.3.3' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook-admonish --version)" == "mdbook-admonish 1.7.0" ]] || { echo "'mdbook-open-on-gh 1.7.0' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
cd docs/the_auditors_handbook && \
mdbook build

View File

@@ -16,7 +16,7 @@ from ./spec/datatypes/capella import
from ./spec/datatypes/deneb import ExecutionPayloadHeader
type
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#beaconstate
# Memory-representation-equivalent to a phase0 BeaconState for in-place SSZ
# reading and writing
Phase0BeaconStateNoImmutableValidators* = object
@@ -69,7 +69,7 @@ type
current_justified_checkpoint*: Checkpoint
finalized_checkpoint*: Checkpoint
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#beaconstate
# Memory-representation-equivalent to an Altair BeaconState for in-place SSZ
# reading and writing
AltairBeaconStateNoImmutableValidators* = object
@@ -186,7 +186,7 @@ type
# Execution
latest_execution_payload_header*: bellatrix.ExecutionPayloadHeader # [New in Bellatrix]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#beaconstate
# with indirect changes via ExecutionPayload
# Memory-representation-equivalent to a Capella BeaconState for in-place SSZ
# reading and writing
@@ -258,7 +258,7 @@ type
HashList[HistoricalSummary,
Limit HISTORICAL_ROOTS_LIMIT] # [New in Capella]
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#beaconstate
# with indirect changes via ExecutionPayloadHeader
# Memory-representation-equivalent to a Deneb BeaconState for in-place SSZ
# reading and writing

View File

@@ -140,7 +140,7 @@ type
template disposeSafe(s: untyped): untyped =
if distinctBase(s) != nil:
s.dispose()
s = nil
s = typeof(s)(nil)
proc initHeadersStore(
backend: SqStoreRef,

View File

@@ -96,6 +96,7 @@ type
Table[ValidatorPubKey, SignedValidatorRegistrationV1]
dutyValidatorCount*: int
## Number of validators that we've checked for activation
processingDelay*: Opt[Duration]
const
MaxEmptySlotCount* = uint64(10*60) div SECONDS_PER_SLOT
@@ -115,14 +116,34 @@ template rng*(node: BeaconNode): ref HmacDrbgContext =
proc currentSlot*(node: BeaconNode): Slot =
node.beaconClock.now.slotOrZero
func getPayloadBuilderAddress*(config: BeaconNodeConf): Opt[string] =
if config.payloadBuilderEnable:
Opt.some config.payloadBuilderUrl
else:
Opt.none(string)
proc getPayloadBuilderClient*(
node: BeaconNode, validator_index: uint64): RestResult[RestClientRef] =
if node.config.payloadBuilderEnable:
# Logging done in caller
let res = RestClientRef.new(node.config.payloadBuilderUrl)
if res.isOk and res.get.isNil:
err "Got nil payload builder REST client reference"
else:
res
if not node.config.payloadBuilderEnable:
return err "Payload builder globally disabled"
let
defaultPayloadBuilderAddress = node.config.getPayloadBuilderAddress
pubkey = withState(node.dag.headState):
if validator_index >= forkyState.data.validators.lenu64:
return err "Validator index too high"
forkyState.data.validators.item(validator_index).pubkey
payloadBuilderAddress =
if node.keyManagerHost.isNil:
defaultPayloadBuilderAddress
else:
node.keyManagerHost[].getBuilderConfig(pubkey).valueOr:
defaultPayloadBuilderAddress
if payloadBuilderAddress.isNone:
return err "Payload builder disabled"
let res = RestClientRef.new(payloadBuilderAddress.get)
if res.isOk and res.get.isNil:
err "Got nil payload builder REST client reference"
else:
err "Payload builder globally disabled"
res

View File

@@ -51,8 +51,7 @@ proc initLightClient*(
if not blckPayload.block_hash.isZero:
# engine_newPayloadV1
discard await node.elManager.newExecutionPayload(
blck.message.body)
discard await node.elManager.newExecutionPayload(blck.message)
# Retain optimistic head for other `forkchoiceUpdated` callers.
# May temporarily block `forkchoiceUpdatedV1` calls, e.g., Geth:

View File

@@ -238,6 +238,11 @@ type
desc: "Number of worker threads (\"0\" = use as many threads as there are CPU cores available)"
name: "num-threads" .}: int
useOldStabilitySubnets* {.
hidden
defaultValue: true
name: "debug-use-old-attestation-stability-subnets" .}: bool
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/authentication.md#key-distribution
jwtSecret* {.
desc: "A file containing the hex-encoded 256 bit secret key to be used for verifying/generating JWT tokens"
@@ -305,6 +310,12 @@ type
defaultValue: false
name: "enr-auto-update" .}: bool
enableYamux* {.
hidden
desc: "Enable the Yamux multiplexer"
defaultValue: false
name: "enable-yamux" .}: bool
weakSubjectivityCheckpoint* {.
desc: "Weak subjectivity checkpoint in the format block_root:epoch_number"
name: "weak-subjectivity-checkpoint" .}: Option[Checkpoint]
@@ -1245,7 +1256,7 @@ proc readValue*(r: var TomlReader, value: var GraffitiBytes)
{.raises: [Defect, SerializationError, IOError].} =
try:
value = GraffitiBytes.init(r.readValue(string))
except ValueError as err:
except ValueError:
r.raiseUnexpectedValue("A printable string or 0x-prefixed hex-encoded raw bytes expected")
proc readValue*(r: var TomlReader, val: var NatConfig)

View File

@@ -102,6 +102,12 @@ type LightClientConf* = object
defaultValue: false
name: "enr-auto-update" .}: bool
enableYamux* {.
hidden
desc: "Enable the Yamux multiplexer"
defaultValue: false
name: "enable-yamux" .}: bool
agentString* {.
defaultValue: "nimbus",
desc: "Node agent string which is used as identifier in network"

View File

@@ -5,7 +5,7 @@ This folder holds the various consensus object pools needed for a blockchain cli
Object in those pools have passed the "gossip validation" filter according
to specs:
- blocks: https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_block
- aggregate attestations: https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
- aggregate attestations: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
- unaggregated attestation: https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
- voluntary exits: https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/specs/phase0/p2p-interface.md#voluntary_exit
- Attester slashings: https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#attester_slashing

View File

@@ -772,7 +772,7 @@ proc getBeaconHead*(
finalizedExecutionPayloadHash =
pool.dag.loadExecutionBlockHash(pool.dag.finalizedHead.blck)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/fork_choice/safe-block.md#get_safe_execution_payload_hash
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/fork_choice/safe-block.md#get_safe_execution_payload_hash
safeBlockRoot = pool.forkChoice.get_safe_beacon_block_root()
safeBlock = pool.dag.getBlockRef(safeBlockRoot)
safeExecutionPayloadHash =

View File

@@ -34,7 +34,7 @@ proc addResolvedHeadBlock(
dag: ChainDAGRef,
state: var ForkedHashedBeaconState,
trustedBlock: ForkyTrustedSignedBeaconBlock,
blockVerified: bool,
executionValid: bool,
parent: BlockRef, cache: var StateCache,
onBlockAdded: OnForkyBlockAdded,
stateDataDur, sigVerifyDur, stateVerifyDur: Duration
@@ -46,7 +46,7 @@ proc addResolvedHeadBlock(
let
blockRoot = trustedBlock.root
blockRef = BlockRef.init(
blockRoot, executionValid = blockVerified, trustedBlock.message)
blockRoot, executionValid = executionValid, trustedBlock.message)
startTick = Moment.now()
link(parent, blockRef)
@@ -80,8 +80,7 @@ proc addResolvedHeadBlock(
debug "Block resolved",
blockRoot = shortLog(blockRoot),
blck = shortLog(trustedBlock.message),
blockVerified,
heads = dag.heads.len(),
executionValid, heads = dag.heads.len(),
stateDataDur, sigVerifyDur, stateVerifyDur,
putBlockDur = putBlockTick - startTick,
epochRefDur = epochRefTick - putBlockTick
@@ -153,17 +152,13 @@ proc advanceClearanceState*(dag: ChainDAGRef) =
debug "Prepared clearance state for next block",
next, updateStateDur = Moment.now() - startTick
proc addHeadBlock*(
dag: ChainDAGRef, verifier: var BatchVerifier,
signedBlock: ForkySignedBeaconBlock,
blockVerified: bool,
onBlockAdded: OnForkyBlockAdded
): Result[BlockRef, VerifierError] =
## Try adding a block to the chain, verifying first that it passes the state
## transition function and contains correct cryptographic signature.
proc checkHeadBlock*(
dag: ChainDAGRef, signedBlock: ForkySignedBeaconBlock):
Result[BlockRef, VerifierError] =
## Perform pre-addHeadBlock sanity checks returning the parent to use when
## calling `addHeadBlock`.
##
## Cryptographic checks can be skipped by adding skipBlsValidation to
## dag.updateFlags
## This function must be called before `addHeadBlockWithParent`.
logScope:
blockRoot = shortLog(signedBlock.root)
blck = shortLog(signedBlock.message)
@@ -186,14 +181,14 @@ proc addHeadBlock*(
debug "Duplicate block"
return err(VerifierError.Duplicate)
# Block is older than finalized, but different from the block in our
# canonical history: it must be from an unviable branch
debug "Block from unviable fork",
existing = shortLog(existing.get()),
finalizedHead = shortLog(dag.finalizedHead),
tail = shortLog(dag.tail)
# Block is older than finalized, but different from the block in our
# canonical history: it must be from an unviable branch
debug "Block from unviable fork",
existing = shortLog(existing.get()),
finalizedHead = shortLog(dag.finalizedHead),
tail = shortLog(dag.tail)
return err(VerifierError.UnviableFork)
return err(VerifierError.UnviableFork)
# Check non-finalized blocks as well
if dag.containsForkBlock(blockRoot):
@@ -222,6 +217,29 @@ proc addHeadBlock*(
return err(VerifierError.Invalid)
ok(parent)
proc addHeadBlockWithParent*(
dag: ChainDAGRef, verifier: var BatchVerifier,
signedBlock: ForkySignedBeaconBlock, parent: BlockRef,
executionValid: bool, onBlockAdded: OnForkyBlockAdded
): Result[BlockRef, VerifierError] =
## Try adding a block to the chain, verifying first that it passes the state
## transition function and contains correct cryptographic signature.
##
## Cryptographic checks can be skipped by adding skipBlsValidation to
## dag.updateFlags.
##
## The parent must be obtained using `checkHeadBlock` to ensure complete
## verification.
logScope:
blockRoot = shortLog(signedBlock.root)
blck = shortLog(signedBlock.message)
signature = shortLog(signedBlock.signature)
template blck(): untyped = signedBlock.message # shortcuts without copy
template blockRoot(): untyped = signedBlock.root
# The block is resolved, now it's time to validate it to ensure that the
# blocks we add to the database are clean for the given state
let startTick = Moment.now()
@@ -276,7 +294,7 @@ proc addHeadBlock*(
ok addResolvedHeadBlock(
dag, dag.clearanceState,
signedBlock.asTrusted(),
blockVerified = blockVerified,
executionValid,
parent, cache,
onBlockAdded,
stateDataDur = stateDataTick - startTick,
@@ -286,10 +304,21 @@ proc addHeadBlock*(
proc addHeadBlock*(
dag: ChainDAGRef, verifier: var BatchVerifier,
signedBlock: ForkySignedBeaconBlock,
executionValid: bool,
onBlockAdded: OnForkyBlockAdded
): Result[BlockRef, VerifierError] =
addHeadBlock(
dag, verifier, signedBlock, blockVerified = true, onBlockAdded)
addHeadBlockWithParent(
dag, verifier, signedBlock, ? dag.checkHeadBlock(signedBlock),
executionValid, onBlockAdded)
proc addHeadBlock*(
dag: ChainDAGRef, verifier: var BatchVerifier,
signedBlock: ForkySignedBeaconBlock,
onBlockAdded: OnForkyBlockAdded
): Result[BlockRef, VerifierError] =
addHeadBlockWithParent(
dag, verifier, signedBlock, ? dag.checkHeadBlock(signedBlock),
executionValid = true, onBlockAdded)
proc addBackfillBlock*(
dag: ChainDAGRef,

View File

@@ -134,7 +134,7 @@ func link*(parent, child: BlockRef) =
func get_ancestor*(blck: BlockRef, slot: Slot,
maxDepth = 100'i64 * 365 * 24 * 60 * 60 div SECONDS_PER_SLOT.int):
BlockRef =
## https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#get_ancestor
## https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#get_ancestor
## Return the most recent block as of the time at `slot` that not more recent
## than `blck` itself
if isNil(blck): return nil
@@ -155,30 +155,6 @@ func get_ancestor*(blck: BlockRef, slot: Slot,
blck = blck.parent
func commonAncestor*(a, b: BlockRef, lowSlot: Slot): Opt[BlockRef] =
## Return the common ancestor with highest slot of two non-nil `BlockRef`,
## limited by `lowSlot` (`err` if exceeded).
doAssert a != nil
doAssert b != nil
if a.slot < lowSlot or b.slot < lowSlot:
return err()
var
aa = a
bb = b
while aa != bb:
if aa.slot >= bb.slot:
aa = aa.parent
doAssert aa != nil, "All `BlockRef` lead to `finalizedHead`"
if aa.slot < lowSlot:
return err()
else:
bb = bb.parent
doAssert bb != nil, "All `BlockRef` lead to `finalizedHead`"
if bb.slot < lowSlot:
return err()
ok aa
func atSlot*(blck: BlockRef, slot: Slot): BlockSlot =
## Return a BlockSlot at a given slot, with the block set to the closest block
## available. If slot comes from before the block, a suitable block ancestor

View File

@@ -279,43 +279,14 @@ type
# balances, as used in fork choice
effective_balances_bytes*: seq[byte]
# TODO when Nim 1.2 support is dropped, make these generic. 1.2 generates
# invalid C code, which gcc refuses to compile. Example test case:
# type
# OnBlockAdded[T] = proc(x: T)
# OnPhase0BlockAdded = OnBlockAdded[int]
# proc f(x: OnPhase0BlockAdded) = discard
# const nilCallback = OnPhase0BlockAdded(nil)
# f(nilCallback)
OnPhase0BlockAdded* = proc(
blckRef: BlockRef,
blck: phase0.TrustedSignedBeaconBlock,
epochRef: EpochRef,
unrealized: FinalityCheckpoints) {.gcsafe, raises: [Defect].}
OnAltairBlockAdded* = proc(
blckRef: BlockRef,
blck: altair.TrustedSignedBeaconBlock,
epochRef: EpochRef,
unrealized: FinalityCheckpoints) {.gcsafe, raises: [Defect].}
OnBellatrixBlockAdded* = proc(
blckRef: BlockRef,
blck: bellatrix.TrustedSignedBeaconBlock,
epochRef: EpochRef,
unrealized: FinalityCheckpoints) {.gcsafe, raises: [Defect].}
OnCapellaBlockAdded* = proc(
blckRef: BlockRef,
blck: capella.TrustedSignedBeaconBlock,
epochRef: EpochRef,
unrealized: FinalityCheckpoints) {.gcsafe, raises: [Defect].}
OnDenebBlockAdded* = proc(
blckRef: BlockRef,
blck: deneb.TrustedSignedBeaconBlock,
epochRef: EpochRef,
OnBlockAdded[T] = proc(
blckRef: BlockRef, blck: T, epochRef: EpochRef,
unrealized: FinalityCheckpoints) {.gcsafe, raises: [Defect].}
OnPhase0BlockAdded* = OnBlockAdded[phase0.TrustedSignedBeaconBlock]
OnAltairBlockAdded* = OnBlockAdded[altair.TrustedSignedBeaconBlock]
OnBellatrixBlockAdded* = OnBlockAdded[bellatrix.TrustedSignedBeaconBlock]
OnCapellaBlockAdded* = OnBlockAdded[capella.TrustedSignedBeaconBlock]
OnDenebBlockAdded* = OnBlockAdded[deneb.TrustedSignedBeaconBlock]
OnForkyBlockAdded* =
OnPhase0BlockAdded | OnAltairBlockAdded | OnBellatrixBlockAdded |

View File

@@ -77,7 +77,7 @@ type
func init*(T: type Quarantine): T =
T()
func checkMissing*(quarantine: var Quarantine): seq[FetchRecord] =
func checkMissing*(quarantine: var Quarantine, max: int): seq[FetchRecord] =
## Return a list of blocks that we should try to resolve from other client -
## to be called periodically but not too often (once per slot?)
var done: seq[Eth2Digest]
@@ -85,16 +85,17 @@ func checkMissing*(quarantine: var Quarantine): seq[FetchRecord] =
for k, v in quarantine.missing.mpairs():
if v.tries > 8:
done.add(k)
else:
inc v.tries
for k in done:
quarantine.missing.del(k)
# simple (simplistic?) exponential backoff for retries..
for k, v in quarantine.missing:
for k, v in quarantine.missing.mpairs:
v.tries += 1
if countOnes(v.tries.uint64) == 1:
result.add(FetchRecord(root: k))
if result.len >= max:
break
# TODO stew/sequtils2
template anyIt(s, pred: untyped): bool =
@@ -263,7 +264,7 @@ func addOrphan*(
if parent_root in quarantine.unviable:
quarantine.unviable[signedBlock.root] = ()
return ok()
return err("block parent unviable")
# Even if the quarantine is full, we need to schedule its parent for
# downloading or we'll never get to the bottom of things

View File

@@ -1330,7 +1330,45 @@ proc getFinalizedEpochRef*(dag: ChainDAGRef): EpochRef =
dag.finalizedHead.blck, dag.finalizedHead.slot.epoch, false).expect(
"getEpochRef for finalized head should always succeed")
func ancestorSlotForShuffling*(
proc getBlockIdAtSlot(
dag: ChainDAGRef, state: ForkyHashedBeaconState, slot: Slot): Opt[BlockId] =
if slot >= state.data.slot:
Opt.some state.latest_block_id
elif state.data.slot <= slot + SLOTS_PER_HISTORICAL_ROOT:
dag.getBlockId(state.data.get_block_root_at_slot(slot))
else:
Opt.none(BlockId)
proc ancestorSlot*(
dag: ChainDAGRef, state: ForkyHashedBeaconState, bid: BlockId,
lowSlot: Slot): Opt[Slot] =
## Return common ancestor slot of `bid` and `state`, if at least `lowSlot`.
## Return `none` if no common ancestor is found with slot >= `lowSlot`.
if state.data.slot < lowSlot or bid.slot < lowSlot:
return Opt.none(Slot)
var stateBid = ? dag.getBlockIdAtSlot(state, bid.slot)
if stateBid.slot < lowSlot:
return Opt.none(Slot)
var blockBid = (? dag.atSlot(bid, stateBid.slot)).bid
if blockBid.slot < lowSlot:
return Opt.none(Slot)
while stateBid != blockBid:
if stateBid.slot >= blockBid.slot:
stateBid = ? dag.getBlockIdAtSlot(
state, min(blockBid.slot, stateBid.slot - 1))
if stateBid.slot < lowSlot:
return Opt.none(Slot)
else:
blockBid = ? dag.parent(blockBid)
if blockBid.slot < lowSlot:
return Opt.none(Slot)
Opt.some stateBid.slot
proc ancestorSlotForAttesterShuffling*(
dag: ChainDAGRef, state: ForkyHashedBeaconState,
blck: BlockRef, epoch: Epoch): Opt[Slot] =
## Return slot of `blck` ancestor to which `state` can be rewinded
@@ -1342,163 +1380,74 @@ func ancestorSlotForShuffling*(
const numDelayEpochs = compute_activation_exit_epoch(GENESIS_EPOCH).uint64
let
lowEpoch = max(epoch, (numDelayEpochs - 1).Epoch) - (numDelayEpochs - 1)
lowSlot = lowEpoch.start_slot
if state.data.slot < lowSlot or blck.slot < lowSlot:
return err()
ancestorSlot = ? dag.ancestorSlot(state, blck.bid, lowEpoch.start_slot)
Opt.some min(ancestorSlot, epoch.attester_dependent_slot)
# Check that state is related to the information stored in the DAG,
# and determine the corresponding `BlockRef`, or `finalizedHead` if finalized
let
stateBid = state.latest_block_id
stateBlck =
if dag.finalizedHead.blck == nil:
return err()
elif stateBid.slot > dag.finalizedHead.blck.slot:
? dag.getBlockRef(stateBid.root)
elif stateBid.slot == dag.finalizedHead.blck.slot:
if stateBid.root != dag.finalizedHead.blck.root:
return err()
dag.finalizedHead.blck
else:
let bsi = ? dag.getBlockIdAtSlot(stateBid.slot)
if bsi.bid != stateBid:
return err()
dag.finalizedHead.blck
type AttesterRandaoMix = tuple[dependentBid: BlockId, mix: Eth2Digest]
# Check that history up to `lowSlot` is included in `state`,
# otherwise `get_active_validator_indices` may still change
if lowSlot <= dag.finalizedHead.blck.slot:
let
bsi = ? dag.getBlockIdAtSlot(lowSlot)
stateLowBlockRoot =
if state.data.slot == lowSlot:
stateBid.root
else:
state.data.get_block_root_at_slot(lowSlot)
if stateLowBlockRoot != bsi.bid.root:
return err()
# Compute ancestor slot for starting RANDAO recovery
let
ancestorBlck =
if stateBlck == dag.finalizedHead.blck:
dag.finalizedHead.blck
else:
? commonAncestor(blck, stateBlck, lowSlot)
dependentSlot = epoch.attester_dependent_slot
doAssert dependentSlot >= lowSlot
ok min(min(stateBid.slot, ancestorBlck.slot), dependentSlot)
proc mixRandao(
dag: ChainDAGRef, mix: var Eth2Digest,
bid: BlockId): Opt[void] =
## Mix in/out the RANDAO reveal from the given block.
let bdata = ? dag.getForkedBlock(bid)
withBlck(bdata): # See `process_randao` / `process_randao_mixes_reset`
mix.data.mxor eth2digest(blck.message.body.randao_reveal.toRaw()).data
ok()
proc computeRandaoMix*(
proc computeAttesterRandaoMix(
dag: ChainDAGRef, state: ForkyHashedBeaconState,
blck: BlockRef, epoch: Epoch
): Opt[tuple[dependentBid: BlockId, mix: Eth2Digest]] =
blck: BlockRef, epoch: Epoch): Opt[AttesterRandaoMix] =
## Compute the requested RANDAO mix for `blck@epoch` based on `state`.
## `state` must have the correct `get_active_validator_indices` for `epoch`.
## RANDAO reveals of blocks from `state.data.slot` back to `ancestorSlot` are
## mixed out from `state.data.randao_mixes`, and RANDAO reveals from blocks
## up through `epoch.attester_dependent_slot` are mixed in.
## If `state` has unviable `get_active_validator_indices`, return `none`.
# Check `state` has locked-in `get_active_validator_indices` for `epoch`
let
stateSlot = state.data.slot
dependentSlot = epoch.attester_dependent_slot
# Check `state` has locked-in `get_active_validator_indices` for `epoch`
ancestorSlot = ? dag.ancestorSlotForShuffling(state, blck, epoch)
ancestorSlot = ? dag.ancestorSlotForAttesterShuffling(state, blck, epoch)
doAssert ancestorSlot <= stateSlot
doAssert ancestorSlot <= dependentSlot
# Load initial mix
var mix {.noinit.}: Eth2Digest
# Determine block for obtaining RANDAO mix
let
stateEpoch = stateSlot.epoch
ancestorEpoch = ancestorSlot.epoch
highRandaoSlot =
# `randao_mixes[ancestorEpoch]`
if stateEpoch == ancestorEpoch:
stateSlot
else:
(ancestorEpoch + 1).start_slot - 1
startSlot =
if ancestorEpoch == GENESIS_EPOCH:
# Can only move backward
mix = state.data.get_randao_mix(ancestorEpoch)
highRandaoSlot
else:
# `randao_mixes[ancestorEpoch - 1]`
let lowRandaoSlot = ancestorEpoch.start_slot - 1
if highRandaoSlot - ancestorSlot < ancestorSlot - lowRandaoSlot:
mix = state.data.get_randao_mix(ancestorEpoch)
highRandaoSlot
else:
mix = state.data.get_randao_mix(ancestorEpoch - 1)
lowRandaoSlot
slotsToMix =
if startSlot > ancestorSlot:
(ancestorSlot + 1) .. startSlot
else:
(startSlot + 1) .. ancestorSlot
highRoot =
if slotsToMix.b == stateSlot:
state.latest_block_root
else:
doAssert slotsToMix.b < stateSlot
state.data.get_block_root_at_slot(slotsToMix.b)
# Move `mix` from `startSlot` to `ancestorSlot`
var bid =
if slotsToMix.b >= dag.finalizedHead.slot:
var b = ? dag.getBlockRef(highRoot)
let lowSlot = max(slotsToMix.a, dag.finalizedHead.slot)
while b.bid.slot > lowSlot:
? dag.mixRandao(mix, b.bid)
b = b.parent
dependentBid =
if dependentSlot >= dag.finalizedHead.slot:
var b = blck.get_ancestor(dependentSlot)
doAssert b != nil
b.bid
else:
var highSlot = slotsToMix.b
const availableSlots = SLOTS_PER_HISTORICAL_ROOT
let lowSlot = max(state.data.slot, availableSlots.Slot) - availableSlots
while highSlot > lowSlot and
state.data.get_block_root_at_slot(highSlot - 1) == highRoot:
dec highSlot
if highSlot + SLOTS_PER_HISTORICAL_ROOT > state.data.slot:
BlockId(slot: highSlot, root: highRoot)
b.bid
else:
let bsi = ? dag.getBlockIdAtSlot(highSlot)
doAssert bsi.bid.root == highRoot
let bsi = ? dag.getBlockIdAtSlot(dependentSlot)
bsi.bid
while bid.slot >= slotsToMix.a:
? dag.mixRandao(mix, bid)
bid = ? dag.parent(bid)
dependentBdata = ? dag.getForkedBlock(dependentBid)
var mix {.noinit.}: Eth2Digest
# Move `mix` from `ancestorSlot` to `dependentSlot`
var dependentBid {.noinit.}: BlockId
bid =
if dependentSlot >= dag.finalizedHead.slot:
var b = blck.get_ancestor(dependentSlot)
doAssert b != nil
dependentBid = b.bid
let lowSlot = max(ancestorSlot, dag.finalizedHead.slot)
while b.bid.slot > lowSlot:
? dag.mixRandao(mix, b.bid)
b = b.parent
doAssert b != nil
b.bid
else:
let bsi = ? dag.getBlockIdAtSlot(dependentSlot)
dependentBid = bsi.bid
bsi.bid
while bid.slot > ancestorSlot:
? dag.mixRandao(mix, bid)
bid = ? dag.parent(bid)
# If `dependentBid` is post merge, RANDAO information is available
withBlck(dependentBdata):
when consensusFork >= ConsensusFork.Bellatrix:
if blck.message.is_execution_block:
mix = eth2digest(blck.message.body.randao_reveal.toRaw())
mix.data.mxor blck.message.body.execution_payload.prev_randao.data
return ok (dependentBid: dependentBid, mix: mix)
# RANDAO mix has to be recomputed from `blck` and `state`
proc mixToAncestor(highBid: BlockId): Opt[void] =
## Mix in/out RANDAO reveals back to `ancestorSlot`
var bid = highBid
while bid.slot > ancestorSlot:
let bdata = ? dag.getForkedBlock(bid)
withBlck(bdata): # See `process_randao` / `process_randao_mixes_reset`
mix.data.mxor eth2digest(blck.message.body.randao_reveal.toRaw()).data
bid = ? dag.parent(bid)
ok()
# Mix in RANDAO from `blck`
if ancestorSlot < dependentBid.slot:
withBlck(dependentBdata):
mix = eth2digest(blck.message.body.randao_reveal.toRaw())
? mixToAncestor(? dag.parent(dependentBid))
else:
mix.reset()
# Mix in RANDAO from `state`
let ancestorEpoch = ancestorSlot.epoch
if ancestorEpoch + EPOCHS_PER_HISTORICAL_VECTOR <= stateSlot.epoch:
return Opt.none(AttesterRandaoMix)
let mixRoot = state.dependent_root(ancestorEpoch + 1)
if mixRoot.isZero:
return Opt.none(AttesterRandaoMix)
? mixToAncestor(? dag.getBlockId(mixRoot))
mix.data.mxor state.data.get_randao_mix(ancestorEpoch).data
ok (dependentBid: dependentBid, mix: mix)
@@ -1506,7 +1455,7 @@ proc computeShufflingRefFromState*(
dag: ChainDAGRef, state: ForkyHashedBeaconState,
blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] =
let (dependentBid, mix) =
? dag.computeRandaoMix(state, blck, epoch)
? dag.computeAttesterRandaoMix(state, blck, epoch)
return ok ShufflingRef(
epoch: epoch,
@@ -1559,15 +1508,9 @@ proc computeShufflingRefFromDatabase*(
proc computeShufflingRef*(
dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] =
# Try to compute `ShufflingRef` from states available in memory
template tryWithState(state: ForkedHashedBeaconState) =
withState(state):
let shufflingRef =
dag.computeShufflingRefFromState(forkyState, blck, epoch)
if shufflingRef.isOk:
return shufflingRef
tryWithState dag.headState
tryWithState dag.epochRefState
tryWithState dag.clearanceState
let shufflingRef = dag.computeShufflingRefFromMemory(blck, epoch)
if shufflingRef.isOk:
return shufflingRef
# Fall back to database
dag.computeShufflingRefFromDatabase(blck, epoch)
@@ -1960,7 +1903,7 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
prunedHeads = hlen - dag.heads.len,
dagPruneDur = Moment.now() - startTick
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/sync/optimistic.md#helpers
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/sync/optimistic.md#helpers
template is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool =
let blck =
if bid.slot <= dag.finalizedHead.slot:
@@ -2119,7 +2062,7 @@ proc pruneStateCachesDAG*(dag: ChainDAGRef) =
statePruneDur = statePruneTick - startTick,
epochRefPruneDur = epochRefPruneTick - statePruneTick
proc pruneStep(horizon, lastHorizon, lastBlockHorizon: Slot):
func pruneStep(horizon, lastHorizon, lastBlockHorizon: Slot):
tuple[stateHorizon, blockHorizon: Slot] =
## Compute a reasonable incremental pruning step considering the current
## horizon, how far the database has been pruned already and where we want the
@@ -2250,7 +2193,10 @@ proc pruneHistory*(dag: ChainDAGRef, startup = false) =
cur = dag.parent(bid)
if startup and
# TODO There have been varied reports of startup pruning causing long
# startup times - an incremental approach would be needed here also
if false and
startup and
dag.cfg.consensusForkAtEpoch(blockHorizon.epoch) > ConsensusFork.Phase0:
# Once during start, we'll clear all "old fork" data - this ensures we get
# rid of any leftover junk in the tables - we do so after linear pruning
@@ -2644,7 +2590,7 @@ proc getProposalState*(
ok state
proc aggregateAll*(
func aggregateAll*(
dag: ChainDAGRef,
validator_indices: openArray[ValidatorIndex]): Result[CookedPubKey, cstring] =
if validator_indices.len == 0:
@@ -2669,7 +2615,7 @@ proc aggregateAll*(
ok(finish(aggregateKey))
proc aggregateAll*(
func aggregateAll*(
dag: ChainDAGRef,
validator_indices: openArray[ValidatorIndex|uint64],
bits: BitSeq | BitArray): Result[CookedPubKey, cstring] =

View File

@@ -340,11 +340,11 @@ proc getGasLimit*(
from ../spec/datatypes/bellatrix import PayloadID
proc runProposalForkchoiceUpdated*(
self: ref ConsensusManager, wallSlot: Slot) {.async.} =
self: ref ConsensusManager, wallSlot: Slot): Future[Opt[void]] {.async.} =
let
nextWallSlot = wallSlot + 1
(validatorIndex, nextProposer) = self.checkNextProposer(wallSlot).valueOr:
return
return err()
debug "runProposalForkchoiceUpdated: expected to be proposing next slot",
nextWallSlot, validatorIndex, nextProposer
@@ -353,7 +353,7 @@ proc runProposalForkchoiceUpdated*(
if nextWallSlot.is_epoch:
debug "runProposalForkchoiceUpdated: not running early fcU for epoch-aligned proposal slot",
nextWallSlot, validatorIndex, nextProposer
return
return err()
# Approximately lines up with validator_duties version. Used optimistically/
# opportunistically, so mismatches are fine if not too frequent.
@@ -382,7 +382,7 @@ proc runProposalForkchoiceUpdated*(
headBlockHash = self.dag.loadExecutionBlockHash(beaconHead.blck)
if headBlockHash.isZero:
return
return err()
try:
let safeBlockHash = beaconHead.safeExecutionPayloadHash
@@ -410,6 +410,8 @@ proc runProposalForkchoiceUpdated*(
except CatchableError as err:
error "Engine API fork-choice update failed", err = err.msg
ok()
proc updateHeadWithExecution*(
self: ref ConsensusManager, initialNewHead: BeaconHead,
getBeaconTimeFn: GetBeaconTimeFn) {.async.} =
@@ -455,7 +457,7 @@ proc updateHeadWithExecution*(
# needs while runProposalForkchoiceUpdated requires RANDAO information
# from the head state corresponding to the `newHead` block, which only
# self.dag.updateHead(...) sets up.
await self.runProposalForkchoiceUpdated(getBeaconTimeFn().slotOrZero)
discard await self.runProposalForkchoiceUpdated(getBeaconTimeFn().slotOrZero)
self[].checkExpectedBlock()
except CatchableError as exc:

View File

@@ -25,7 +25,7 @@ logScope: topics = "spec_cache"
func count_active_validators*(shufflingRef: ShufflingRef): uint64 =
shufflingRef.shuffled_active_validator_indices.lenu64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#get_committee_count_per_slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#get_committee_count_per_slot
func get_committee_count_per_slot*(shufflingRef: ShufflingRef): uint64 =
get_committee_count_per_slot(count_active_validators(shufflingRef))
@@ -51,7 +51,7 @@ iterator get_beacon_committee*(
committees_per_slot * SLOTS_PER_EPOCH
): yield (index_in_committee, idx)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee*(
shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex):
seq[ValidatorIndex] =

View File

@@ -346,7 +346,7 @@ proc produceSyncAggregate*(
proc isEpochLeadTime*(
pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool =
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#sync-committee-subnet-stability
# This ensures a uniform distribution without requiring additional state:
# (1/4) = 1/4, 4 slots out
# (3/4) * (1/3) = 1/4, 3 slots out

View File

@@ -138,7 +138,7 @@ proc getValidator*(decryptor: var MultipleKeystoresDecryptor,
ok(ValidatorStorage(kind: ValidatorStorageKind.Identifier,
ident: ident.get()))
proc getIdent*(storage: ValidatorStorage): ValidatorIdent =
func getIdent*(storage: ValidatorStorage): ValidatorIdent =
case storage.kind
of ValidatorStorageKind.Keystore:
ValidatorIdent(kind: ValidatorQueryKind.Key,
@@ -190,16 +190,18 @@ proc restValidatorExit(config: BeaconNodeConf) {.async.} =
reason = exc.msg
quit 1
let exitAtEpoch = if config.exitAtEpoch.isSome:
Epoch config.exitAtEpoch.get
else:
let currentEpoch = block:
let
genesisTime = genesis.genesis_time
genesisTime = genesis.genesis_time
beaconClock = BeaconClock.init(genesisTime)
time = getTime()
slot = beaconClock.toSlot(time).slot
epoch = slot.uint64 div 32
Epoch epoch
Epoch(slot.uint64 div 32)
let exitAtEpoch = if config.exitAtEpoch.isSome:
Epoch config.exitAtEpoch.get
else:
currentEpoch
let fork = try:
let response = await client.getStateForkPlain(stateIdHead)
@@ -217,6 +219,26 @@ proc restValidatorExit(config: BeaconNodeConf) {.async.} =
reason = exc.msg
quit 1
let signingFork = try:
let response = await client.getSpec()
if response.status == 200:
let spec = response.data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/beacon-chain.md#modified-process_voluntary_exit
if currentEpoch >= Epoch(spec.data.DENEB_FORK_EPOCH):
Fork(
current_version: spec.data.CAPELLA_FORK_VERSION,
previous_version: spec.data.CAPELLA_FORK_VERSION,
epoch: GENESIS_EPOCH) # irrelevant when current/previous identical
else:
fork
else:
raise newException(RestError, "Error response (" & $response.status & ")")
except CatchableError as exc:
fatal "Failed to obtain the config spec of the beacon node",
reason = exc.msg
quit 1
if not config.printData:
case askForExitConfirmation()
of ClientExitAction.abort:

View File

@@ -425,11 +425,11 @@ template toGaugeValue(x: Quantity): int64 =
# doAssert SECONDS_PER_ETH1_BLOCK * cfg.ETH1_FOLLOW_DISTANCE < GENESIS_DELAY,
# "Invalid configuration: GENESIS_DELAY is set too low"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#get_eth1_data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#get_eth1_data
func compute_time_at_slot(genesis_time: uint64, slot: Slot): uint64 =
genesis_time + slot * SECONDS_PER_SLOT
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#get_eth1_data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#get_eth1_data
func voting_period_start_time(state: ForkedHashedBeaconState): uint64 =
let eth1_voting_period_start_slot =
getStateField(state, slot) - getStateField(state, slot) mod
@@ -437,7 +437,7 @@ func voting_period_start_time(state: ForkedHashedBeaconState): uint64 =
compute_time_at_slot(
getStateField(state, genesis_time), eth1_voting_period_start_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#get_eth1_data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#get_eth1_data
func is_candidate_block(cfg: RuntimeConfig,
blk: Eth1Block,
period_start: uint64): bool =
@@ -564,6 +564,7 @@ func asConsensusType*(payload: engine_api.GetPayloadV3Response):
# types for KZG commitments and Blobs in the `web3` and the `deneb` spec types.
# Both are defined as `array[N, byte]` under the hood.
kzgs: KzgCommitments payload.blobsBundle.commitments.mapIt(it.bytes),
proofs: payload.blobsBundle.proofs.mapIt(it.bytes),
blobs: Blobs payload.blobsBundle.blobs.mapIt(it.bytes)
)
@@ -905,8 +906,10 @@ proc getPayload*(m: ELManager,
headBlock, safeBlock, finalizedBlock, timestamp,
randomData, suggestedFeeRecipient, engineApiWithdrawals)
# `getPayloadFromSingleEL` may introduce additional latency
const extraProcessingOverhead = 500.milliseconds
let
timeout = GETPAYLOAD_TIMEOUT
timeout = GETPAYLOAD_TIMEOUT + extraProcessingOverhead
deadline = sleepAsync(timeout)
requests = m.elConnections.mapIt(it.getPayloadFromSingleEL(
EngineApiResponseType(PayloadType),
@@ -1058,10 +1061,12 @@ proc sendNewPayloadToSingleEL(connection: ELConnection,
proc sendNewPayloadToSingleEL(connection: ELConnection,
payload: engine_api.ExecutionPayloadV3,
versioned_hashes: seq[engine_api.VersionedHash]):
versioned_hashes: seq[engine_api.VersionedHash],
parent_beacon_block_root: FixedBytes[32]):
Future[PayloadStatusV1] {.async.} =
let rpcClient = await connection.connectedRpcClient()
return await rpcClient.engine_newPayloadV3(payload, versioned_hashes)
return await rpcClient.engine_newPayloadV3(
payload, versioned_hashes, parent_beacon_block_root)
type
StatusRelation = enum
@@ -1153,13 +1158,13 @@ proc processResponse[ELResponseType](
url2 = connections[idx].engineUrl.url,
status2 = status
proc sendNewPayload*(m: ELManager, blockBody: SomeForkyBeaconBlockBody):
proc sendNewPayload*(m: ELManager, blck: SomeForkyBeaconBlock):
Future[PayloadExecutionStatus] {.async.} =
let
earlyDeadline = sleepAsync(chronos.seconds 1)
startTime = Moment.now
deadline = sleepAsync(NEWPAYLOAD_TIMEOUT)
payload = blockBody.execution_payload.asEngineExecutionPayload
payload = blck.body.execution_payload.asEngineExecutionPayload
requests = m.elConnections.mapIt:
let req =
when payload is engine_api.ExecutionPayloadV3:
@@ -1167,9 +1172,11 @@ proc sendNewPayload*(m: ELManager, blockBody: SomeForkyBeaconBlockBody):
# Verify the execution payload is valid
# [Modified in Deneb] Pass `versioned_hashes` to Execution Engine
let versioned_hashes = mapIt(
blockBody.blob_kzg_commitments,
blck.body.blob_kzg_commitments,
engine_api.VersionedHash(kzg_commitment_to_versioned_hash(it)))
sendNewPayloadToSingleEL(it, payload, versioned_hashes)
sendNewPayloadToSingleEL(
it, payload, versioned_hashes,
FixedBytes[32] blck.parent_root.data)
elif payload is engine_api.ExecutionPayloadV1 or
payload is engine_api.ExecutionPayloadV2:
sendNewPayloadToSingleEL(it, payload)
@@ -1673,7 +1680,7 @@ template trackFinalizedState*(m: ELManager,
finalizedStateDepositIndex: uint64): bool =
trackFinalizedState(m.eth1Chain, finalizedEth1Data, finalizedStateDepositIndex)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#get_eth1_data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#get_eth1_data
proc getBlockProposalData*(chain: var Eth1Chain,
state: ForkedHashedBeaconState,
finalizedEth1Data: Eth1Data,
@@ -2125,7 +2132,7 @@ proc syncEth1Chain(m: ELManager, connection: ELConnection) {.async.} =
eth1_synced_head.set eth1SyncedTo.toGaugeValue
proc startChainSyncingLoop(m: ELManager) {.async.} =
info "Starting execution layer deposits syncing",
info "Starting execution layer deposit syncing",
contract = $m.depositContractAddress
var syncedConnectionFut = m.selectConnectionForChainSyncing()
@@ -2135,7 +2142,7 @@ proc startChainSyncingLoop(m: ELManager) {.async.} =
try:
await syncedConnectionFut or sleepAsync(60.seconds)
if not syncedConnectionFut.finished:
warn "No suitable EL connection for deposit syncing"
notice "No synced execution layer available for deposit syncing"
await sleepAsync(chronos.seconds(30))
continue

View File

@@ -7,7 +7,7 @@
{.push raises: [].}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
# Merkle tree helpers
# ---------------------------------------------------------------

View File

@@ -151,7 +151,7 @@ proc getStateSSZ*(
else: len
bytes = newSeqUninitialized[byte](wanted)
let (_, written) = uncompressFramed(tmp, bytes).valueOr:
discard uncompressFramed(tmp, bytes).valueOr:
return err("State failed to decompress, era file corrupt?")
ok()
@@ -384,7 +384,7 @@ proc getPartialState(
try:
readSszBytes(tmp.toOpenArray(0, partialBytes - 1), output)
true
except CatchableError as exc:
except CatchableError:
# TODO log?
false

View File

@@ -107,7 +107,7 @@ proc update_justified(
self.update_justified(dag, blck, justified.epoch)
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#update_checkpoints
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#update_checkpoints
proc update_checkpoints(
self: var Checkpoints, dag: ChainDAGRef,
checkpoints: FinalityCheckpoints): FcResult[void] =
@@ -124,7 +124,7 @@ proc update_checkpoints(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#on_tick_per_slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#on_tick_per_slot
proc on_tick(
self: var ForkChoice, dag: ChainDAGRef, time: BeaconTime): FcResult[void] =
## Must be called at least once per slot.
@@ -206,7 +206,7 @@ func contains*(self: ForkChoiceBackend, block_root: Eth2Digest): bool =
## In particular, before adding a block, its parent must be known to the fork choice
self.proto_array.indices.contains(block_root)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#on_attestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#on_attestation
proc on_attestation*(
self: var ForkChoice,
dag: ChainDAGRef,
@@ -235,7 +235,7 @@ proc on_attestation*(
block_root: beacon_block_root))
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#on_attester_slashing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#on_attester_slashing
func process_equivocation*(
self: var ForkChoice,
validator_index: ValidatorIndex
@@ -350,7 +350,7 @@ func find_head*(
return ok(new_head)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#get_head
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#get_head
proc get_head*(self: var ForkChoice,
dag: ChainDAGRef,
wallTime: BeaconTime): FcResult[Eth2Digest] =
@@ -364,7 +364,7 @@ proc get_head*(self: var ForkChoice,
self.checkpoints.justified.balances,
self.checkpoints.proposer_boost_root)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/fork_choice/safe-block.md#get_safe_beacon_block_root
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/fork_choice/safe-block.md#get_safe_beacon_block_root
func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest =
# Use most recent justified block as a stopgap
self.checkpoints.justified.checkpoint.root

View File

@@ -123,7 +123,7 @@ iterator realizePendingCheckpoints*(
# Reset tip tracking for new epoch
self.currentEpochTips.clear()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#get_weight
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#get_weight
func calculateProposerBoost(validatorBalances: openArray[Gwei]): uint64 =
var total_balance: uint64
for balance in validatorBalances:
@@ -190,7 +190,7 @@ func applyScoreChanges*(self: var ProtoArray,
# If we find the node matching the current proposer boost root, increase
# the delta by the new score amount.
#
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#get_weight
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#get_weight
if (not proposerBoostRoot.isZero) and proposerBoostRoot == node.bid.root:
proposerBoostScore = calculateProposerBoost(newBalances)
if nodeDelta >= 0 and

View File

@@ -10,7 +10,7 @@ This folder holds a collection of modules to:
Gossip validation is different from consensus verification in particular for blocks.
- Blocks: https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_block
- Attestations (aggregated): https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
- Attestations (aggregated): https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
- Attestations (unaggregated): https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#attestation-subnets
- Voluntary exits: https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/p2p-interface.md#voluntary_exit
- Proposer slashings: https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/p2p-interface.md#proposer_slashing

View File

@@ -51,10 +51,6 @@ const
## syncing the finalized part of the chain
PAYLOAD_PRE_WALL_SLOTS = SLOTS_PER_EPOCH * 2
## Number of slots from wall time that we start processing every payload
MAX_DEDUP_QUEUE_LEN = 16
## Number of blocks, with FIFO discipline, against which to check queued
## blocks before being processed to avoid spamming ELs. This should stay
## small enough that even O(n) algorithms are reasonable.
type
BlobSidecars* = seq[ref BlobSidecar]
@@ -111,9 +107,6 @@ type
## The slot at which we sent a payload to the execution client the last
## time
dupBlckBuf: Deque[(Eth2Digest, ValidatorSig)]
# Small buffer to allow for filtering of duplicate blocks in block queue
NewPayloadStatus {.pure.} = enum
valid
notValid
@@ -152,8 +145,6 @@ proc new*(T: type BlockProcessor,
blobQuarantine: blobQuarantine,
getBeaconTime: getBeaconTime,
verifier: BatchVerifier(rng: rng, taskpool: taskpool),
dupBlckBuf: initDeque[(Eth2Digest, ValidatorSig)](
initialSize = MAX_DEDUP_QUEUE_LEN)
)
# Sync callbacks
@@ -184,7 +175,7 @@ proc dumpBlock[T](
discard
from ../consensus_object_pools/block_clearance import
addBackfillBlock, addHeadBlock
addBackfillBlock, addHeadBlockWithParent, checkHeadBlock
proc storeBackfillBlock(
self: var BlockProcessor,
@@ -303,11 +294,10 @@ from ../spec/datatypes/capella import
from ../spec/datatypes/deneb import SignedBeaconBlock, asTrusted, shortLog
proc newExecutionPayload*(
elManager: ELManager,
blockBody: SomeForkyBeaconBlockBody):
elManager: ELManager, blck: SomeForkyBeaconBlock):
Future[Opt[PayloadExecutionStatus]] {.async.} =
template executionPayload: untyped = blockBody.execution_payload
template executionPayload: untyped = blck.body.execution_payload
if not elManager.hasProperlyConfiguredConnection:
if elManager.hasConnection:
@@ -322,7 +312,7 @@ proc newExecutionPayload*(
executionPayload = shortLog(executionPayload)
try:
let payloadStatus = await elManager.sendNewPayload(blockBody)
let payloadStatus = await elManager.sendNewPayload(blck)
debug "newPayload: succeeded",
parentHash = executionPayload.parent_hash,
@@ -349,7 +339,7 @@ proc getExecutionValidity(
try:
let executionPayloadStatus = await elManager.newExecutionPayload(
blck.message.body)
blck.message)
if executionPayloadStatus.isNone:
return NewPayloadStatus.noResponse
@@ -407,12 +397,59 @@ proc storeBlock*(
## blocks, regardless of origin, pass through here. When storing a block,
## we will add it to the dag and pass it to all block consumers that need
## to know about it, such as the fork choice and the monitoring
let
attestationPool = self.consensusManager.attestationPool
startTick = Moment.now()
vm = self.validatorMonitor
dag = self.consensusManager.dag
wallSlot = wallTime.slotOrZero
# If the block is missing its parent, it will be re-orphaned below
self.consensusManager.quarantine[].removeOrphan(signedBlock)
# The block is certainly not missing any more
self.consensusManager.quarantine[].missing.del(signedBlock.root)
if signedBlock.message.parent_root in
self.consensusManager.quarantine[].unviable:
# DAG doesn't know about unviable ancestor blocks - we do however!
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
return err((VerifierError.UnviableFork, ProcessingStatus.completed))
template handleVerifierError(errorParam: VerifierError): auto =
let error = errorParam
case error
of VerifierError.MissingParent:
if (let r = self.consensusManager.quarantine[].addOrphan(
dag.finalizedHead.slot, ForkedSignedBeaconBlock.init(signedBlock));
r.isErr()):
debug "could not add orphan",
blockRoot = shortLog(signedBlock.root),
blck = shortLog(signedBlock.message),
signature = shortLog(signedBlock.signature),
err = r.error()
else:
debug "Block quarantined",
blockRoot = shortLog(signedBlock.root),
blck = shortLog(signedBlock.message),
signature = shortLog(signedBlock.signature)
of VerifierError.UnviableFork:
# Track unviables so that descendants can be discarded promptly
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
else:
discard
err((error, ProcessingStatus.completed))
let
parent = dag.checkHeadBlock(signedBlock)
if parent.isErr():
return handleVerifierError(parent.error())
let
payloadStatus =
if maybeFinalized and
(self.lastPayload + SLOTS_PER_PAYLOAD) > signedBlock.message.slot and
@@ -432,9 +469,6 @@ proc storeBlock*(
NewPayloadStatus.valid # vacuously
payloadValid = payloadStatus == NewPayloadStatus.valid
# The block is certainly not missing any more
self.consensusManager.quarantine[].missing.del(signedBlock.root)
if NewPayloadStatus.invalid == payloadStatus:
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
return err((VerifierError.UnviableFork, ProcessingStatus.completed))
@@ -462,11 +496,6 @@ proc storeBlock*(
else:
discard
# We'll also remove the block as an orphan: it's unlikely the parent is
# missing if we get this far - should that be the case, the block will
# be re-added later
self.consensusManager.quarantine[].removeOrphan(signedBlock)
# TODO with v1.4.0, not sure this is still relevant
# Establish blob viability before calling addHeadBlock to avoid
# writing the block in case of blob error.
@@ -487,28 +516,31 @@ proc storeBlock*(
return err((VerifierError.Invalid, ProcessingStatus.completed))
type Trusted = typeof signedBlock.asTrusted()
let blck = dag.addHeadBlock(self.verifier, signedBlock, payloadValid) do (
let
blck = dag.addHeadBlockWithParent(
self.verifier, signedBlock, parent.value(), payloadValid) do (
blckRef: BlockRef, trustedBlock: Trusted,
epochRef: EpochRef, unrealized: FinalityCheckpoints):
# Callback add to fork choice if valid
attestationPool[].addForkChoice(
epochRef, blckRef, unrealized, trustedBlock.message, wallTime)
# Callback add to fork choice if valid
attestationPool[].addForkChoice(
epochRef, blckRef, unrealized, trustedBlock.message, wallTime)
vm[].registerBeaconBlock(
src, wallTime, trustedBlock.message)
vm[].registerBeaconBlock(
src, wallTime, trustedBlock.message)
for attestation in trustedBlock.message.body.attestations:
for validator_index in dag.get_attesting_indices(attestation):
vm[].registerAttestationInBlock(attestation.data, validator_index,
trustedBlock.message.slot)
for attestation in trustedBlock.message.body.attestations:
for validator_index in dag.get_attesting_indices(attestation):
vm[].registerAttestationInBlock(attestation.data, validator_index,
trustedBlock.message.slot)
withState(dag[].clearanceState):
when consensusFork >= ConsensusFork.Altair and
Trusted isnot phase0.TrustedSignedBeaconBlock: # altair+
for i in trustedBlock.message.body.sync_aggregate.sync_committee_bits.oneIndices():
vm[].registerSyncAggregateInBlock(
trustedBlock.message.slot, trustedBlock.root,
forkyState.data.current_sync_committee.pubkeys.data[i])
withState(dag[].clearanceState):
when consensusFork >= ConsensusFork.Altair and
Trusted isnot phase0.TrustedSignedBeaconBlock: # altair+
for i in trustedBlock.message.body.sync_aggregate.sync_committee_bits.oneIndices():
vm[].registerSyncAggregateInBlock(
trustedBlock.message.slot, trustedBlock.root,
forkyState.data.current_sync_committee.pubkeys.data[i])
self[].dumpBlock(signedBlock, blck)
@@ -516,34 +548,13 @@ proc storeBlock*(
# However this block was before the last finalized epoch and so its parent
# was pruned from the ForkChoice.
if blck.isErr():
case blck.error()
of VerifierError.MissingParent:
if signedBlock.message.parent_root in
self.consensusManager.quarantine[].unviable:
# DAG doesn't know about unviable ancestor blocks - we do! Translate
# this to the appropriate error so that sync etc doesn't retry the block
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
return handleVerifierError(blck.error())
return err((VerifierError.UnviableFork, ProcessingStatus.completed))
if (let r = self.consensusManager.quarantine[].addOrphan(
dag.finalizedHead.slot, ForkedSignedBeaconBlock.init(signedBlock));
r.isErr()):
debug "storeBlock: could not add orphan",
blockRoot = shortLog(signedBlock.root),
blck = shortLog(signedBlock.message),
signature = shortLog(signedBlock.signature),
err = r.error()
of VerifierError.UnviableFork:
# Track unviables so that descendants can be discarded properly
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
else: discard
return err((blck.error, ProcessingStatus.completed))
if payloadStatus in {NewPayloadStatus.valid, NewPayloadStatus.notValid}:
# If the EL responded at all, we don't need to try again for a while
self[].lastPayload = signedBlock.message.slot
# Even if the EL is not responding, we'll only try once every now and then
# to give it a block - this avoids a pathological slowdown where a busy EL
# times out on every block we give it because it's busy with the previous
# one
self[].lastPayload = signedBlock.message.slot
# write blobs now that block has been written.
let blobs = blobsOpt.valueOr: BlobSidecars @[]
@@ -627,30 +638,33 @@ proc storeBlock*(
# `forkchoiceUpdated` necessary for EL client only.
self.consensusManager[].updateHead(newHead.get.blck)
if self.consensusManager.checkNextProposer(wallSlot).isNone:
# No attached validator is next proposer, so use non-proposal fcU
template callForkchoiceUpdated(payloadAttributeType: untyped): auto =
await elManager.expectValidForkchoiceUpdated(
headBlockPayloadAttributesType = payloadAttributeType,
headBlockHash = headExecutionPayloadHash,
safeBlockHash = newHead.get.safeExecutionPayloadHash,
finalizedBlockHash = newHead.get.finalizedExecutionPayloadHash,
receivedBlock = signedBlock)
template callExpectValidFCU(payloadAttributeType: untyped): auto =
await elManager.expectValidForkchoiceUpdated(
headBlockPayloadAttributesType = payloadAttributeType,
headBlockHash = headExecutionPayloadHash,
safeBlockHash = newHead.get.safeExecutionPayloadHash,
finalizedBlockHash = newHead.get.finalizedExecutionPayloadHash,
receivedBlock = signedBlock)
template callForkChoiceUpdated: auto =
case self.consensusManager.dag.cfg.consensusForkAtEpoch(
newHead.get.blck.bid.slot.epoch)
of ConsensusFork.Capella, ConsensusFork.Deneb:
callForkchoiceUpdated(payloadAttributeType = PayloadAttributesV2)
callExpectValidFCU(payloadAttributeType = PayloadAttributesV2)
of ConsensusFork.Phase0, ConsensusFork.Altair,
ConsensusFork.Bellatrix:
callForkchoiceUpdated(payloadAttributeType = PayloadAttributesV1)
callExpectValidFCU(payloadAttributeType = PayloadAttributesV1)
if self.consensusManager.checkNextProposer(wallSlot).isNone:
# No attached validator is next proposer, so use non-proposal fcU
callForkChoiceUpdated()
else:
# Some attached validator is next proposer, so prepare payload. As
# updateHead() updated the DAG head, runProposalForkchoiceUpdated,
# which needs the state corresponding to that head block, can run.
await self.consensusManager.runProposalForkchoiceUpdated(
wallSlot)
if (await self.consensusManager.runProposalForkchoiceUpdated(
wallSlot)).isNone:
callForkChoiceUpdated()
else:
await self.consensusManager.updateHeadWithExecution(
newHead.get, self.getBeaconTime)
@@ -667,12 +681,16 @@ proc storeBlock*(
beacon_store_block_duration_seconds.observe(storeBlockDur.toFloatSeconds())
debug "Block processed",
localHeadSlot = dag.head.slot,
blockSlot = blck.get().slot,
head = shortLog(dag.head),
blck = shortLog(blck.get()),
validationDur, queueDur, storeBlockDur, updateHeadDur
for quarantined in self.consensusManager.quarantine[].pop(blck.get().root):
# Process the blocks that had the newly accepted block as parent
debug "Block from quarantine",
blockRoot = shortLog(signedBlock.root),
quarantined = shortLog(quarantined.root)
withBlck(quarantined):
when typeof(blck).toFork() < ConsensusFork.Deneb:
self[].addBlock(MsgSource.gossip, quarantined, Opt.none(BlobSidecars))
@@ -695,7 +713,7 @@ proc storeBlock*(
blockRoot = shortLog(quarantined.root),
signature = shortLog(quarantined.signature)
return Result[BlockRef, (VerifierError, ProcessingStatus)].ok blck.get
ok blck.value()
# Enqueue
# ------------------------------------------------------------------------------
@@ -735,19 +753,6 @@ proc addBlock*(
except AsyncQueueFullError:
raiseAssert "unbounded queue"
# Dedup
# ------------------------------------------------------------------------------
func checkDuplicateBlocks(self: ref BlockProcessor, entry: BlockEntry): bool =
let key = (entry.blck.root, entry.blck.signature)
if self.dupBlckBuf.contains key:
return true
doAssert self.dupBlckBuf.len <= MAX_DEDUP_QUEUE_LEN
if self.dupBlckBuf.len >= MAX_DEDUP_QUEUE_LEN:
self.dupBlckBuf.shrink(fromFirst = 1)
self.dupBlckBuf.addLast key
false
# Event Loop
# ------------------------------------------------------------------------------
@@ -764,12 +769,6 @@ proc processBlock(
error "Processing block before genesis, clock turned back?"
quit 1
if self.checkDuplicateBlocks(entry):
if entry.resfut != nil:
entry.resfut.complete(Result[void, VerifierError].err(
VerifierError.Duplicate))
return
let res = withBlck(entry.blck):
await self.storeBlock(
entry.src, wallTime, blck, entry.blobs, entry.maybeFinalized,
@@ -781,7 +780,7 @@ proc processBlock(
# - MUST NOT optimistically import the block.
# - MUST NOT apply the block to the fork choice store.
# - MAY queue the block for later processing.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/sync/optimistic.md#execution-engine-errors
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/sync/optimistic.md#execution-engine-errors
await sleepAsync(chronos.seconds(1))
self[].addBlock(
entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized,

View File

@@ -656,7 +656,7 @@ proc processSignedContributionAndProof*(
err(v.error())
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
proc processLightClientFinalityUpdate*(
self: var Eth2Processor, src: MsgSource,
finality_update: ForkedLightClientFinalityUpdate
@@ -672,7 +672,7 @@ proc processLightClientFinalityUpdate*(
beacon_light_client_finality_update_dropped.inc(1, [$v.error[0]])
v
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#process_light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#process_light_client_optimistic_update
proc processLightClientOptimisticUpdate*(
self: var Eth2Processor, src: MsgSource,
optimistic_update: ForkedLightClientOptimisticUpdate

View File

@@ -82,25 +82,55 @@ func check_attestation_block(
ok()
func check_propagation_slot_range(
msgSlot: Slot, wallTime: BeaconTime): Result[Slot, ValidationError] =
let
futureSlot = (wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
consensusFork: ConsensusFork, msgSlot: Slot, wallTime: BeaconTime):
Result[Slot, ValidationError] =
let futureSlot = (wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
if not futureSlot.afterGenesis or msgSlot > futureSlot.slot:
return errIgnore("Attestation slot in the future")
let
pastSlot = (wallTime - MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
let pastSlot = (wallTime - MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/specs/phase0/p2p-interface.md#configuration
# The spec value of ATTESTATION_PROPAGATION_SLOT_RANGE is 32, but it can
# retransmit attestations on the cusp of being out of spec, and which by
# the time they reach their destination might be out of spec.
const ATTESTATION_PROPAGATION_SLOT_RANGE = 28
if not pastSlot.afterGenesis:
return ok(msgSlot)
if pastSlot.afterGenesis and
msgSlot + ATTESTATION_PROPAGATION_SLOT_RANGE < pastSlot.slot:
return errIgnore("Attestation slot in the past")
if consensusFork < ConsensusFork.Deneb:
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#configuration
# The spec value of ATTESTATION_PROPAGATION_SLOT_RANGE is 32, but it can
# retransmit attestations on the cusp of being out of spec, and which by
# the time they reach their destination might be out of spec.
const ATTESTATION_PROPAGATION_SLOT_RANGE = 28
if msgSlot + ATTESTATION_PROPAGATION_SLOT_RANGE < pastSlot.slot:
return errIgnore("Attestation slot in the past")
else:
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/p2p-interface.md#beacon_attestation_subnet_id
# "[IGNORE] the epoch of attestation.data.slot is either the current or
# previous epoch (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e.
# compute_epoch_at_slot(attestation.data.slot) in
# (get_previous_epoch(state), get_current_epoch(state))"
#
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof
# "[IGNORE] the epoch of aggregate.data.slot is either the current or
# previous epoch (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e.
# compute_epoch_at_slot(aggregate.data.slot) in
# (get_previous_epoch(state), get_current_epoch(state))"
if msgSlot.epoch < pastSlot.slot.epoch.get_previous_epoch:
return errIgnore("Attestation slot in the past")
ok(msgSlot)
func check_slot_exact(msgSlot: Slot, wallTime: BeaconTime):
Result[Slot, ValidationError] =
let futureSlot = (wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
if not futureSlot.afterGenesis or msgSlot > futureSlot.slot:
return errIgnore("Sync committee slot in the future")
let pastSlot = (wallTime - MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
if pastSlot.afterGenesis and msgSlot < pastSlot.slot:
return errIgnore("Sync committee slot in the past")
ok(msgSlot)
@@ -120,10 +150,9 @@ func check_beacon_and_target_block(
? check_attestation_block(pool, data.slot, blck)
# [REJECT] The attestation's target block is an ancestor of the block named
# in the LMD vote -- i.e. get_ancestor(store,
# attestation.data.beacon_block_root,
# compute_start_slot_at_epoch(attestation.data.target.epoch)) ==
# attestation.data.target.root
# in the LMD vote -- i.e.
# get_checkpoint_block(store, attestation.data.beacon_block_root,
# attestation.data.target.epoch) == attestation.data.target.root
# the sanity of target.epoch has been checked by check_attestation_slot_target
let target = blck.atCheckpoint(data.target).valueOr:
return errReject("Attestation target is not ancestor of LMD vote block")
@@ -384,7 +413,7 @@ proc validateBeaconBlock*(
# proposer for the slot, signed_beacon_block.message.slot.
#
# While this condition is similar to the proposer slashing condition at
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#proposer-slashing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#proposer-slashing
# it's not identical, and this check does not address slashing:
#
# (1) The beacon blocks must be conflicting, i.e. different, for the same
@@ -464,7 +493,11 @@ proc validateBeaconBlock*(
blockRoot = shortLog(signed_beacon_block.root),
blck = shortLog(signed_beacon_block.message),
err = r.error()
else:
debug "Block quarantined",
blockRoot = shortLog(signed_beacon_block.root),
blck = shortLog(signed_beacon_block.message),
signature = shortLog(signed_beacon_block.signature)
return errIgnore("BeaconBlock: Parent not found")
# Continues block parent validity checking in optimistic case, where it does
@@ -527,7 +560,8 @@ proc validateBeaconBlock*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof
proc validateAttestation*(
pool: ref AttestationPool,
batchCrypto: ref BatchCrypto,
@@ -556,8 +590,13 @@ proc validateAttestation*(
# attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot
# >= attestation.data.slot (a client MAY queue future attestations for
# processing at the appropriate slot).
#
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/p2p-interface.md#beacon_attestation_subnet_id
# modifies this for Deneb and newer forks.
block:
let v = check_propagation_slot_range(slot, wallTime)
let v = check_propagation_slot_range(
pool.dag.cfg.consensusForkAtEpoch(wallTime.slotOrZero.epoch), slot,
wallTime)
if v.isErr(): # [IGNORE]
return err(v.error())
@@ -584,11 +623,10 @@ proc validateAttestation*(
# The following rule follows implicitly from that we clear out any
# unviable blocks from the chain dag:
#
# The current finalized_checkpoint is an ancestor of the block defined by
# attestation.data.beacon_block_root -- i.e. get_ancestor(store,
# attestation.data.beacon_block_root,
# compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) ==
# store.finalized_checkpoint.root
# [IGNORE] The current finalized_checkpoint is an ancestor of the block
# defined by attestation.data.beacon_block_root -- i.e.
# get_checkpoint_block(store, attestation.data.beacon_block_root,
# store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root
let
shufflingRef =
pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr:
@@ -693,7 +731,8 @@ proc validateAttestation*(
return ok((validator_index, sig))
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof
proc validateAggregate*(
pool: ref AttestationPool,
batchCrypto: ref BatchCrypto,
@@ -723,8 +762,13 @@ proc validateAggregate*(
# ATTESTATION_PROPAGATION_SLOT_RANGE slots (with a
# MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. aggregate.data.slot +
# ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot
#
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof
# modifies this for Deneb and newer forks.
block:
let v = check_propagation_slot_range(slot, wallTime)
let v = check_propagation_slot_range(
pool.dag.cfg.consensusForkAtEpoch(wallTime.slotOrZero.epoch), slot,
wallTime)
if v.isErr(): # [IGNORE]
return err(v.error())
@@ -883,11 +927,10 @@ proc validateAggregate*(
# The following rule follows implicitly from that we clear out any
# unviable blocks from the chain dag:
#
# The current finalized_checkpoint is an ancestor of the block defined by
# aggregate.data.beacon_block_root -- i.e. get_ancestor(store,
# aggregate.data.beacon_block_root,
# compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) ==
# store.finalized_checkpoint.root
# [IGNORE] The current finalized_checkpoint is an ancestor of the block
# defined by aggregate.data.beacon_block_root -- i.e.
# get_checkpoint_block(store, aggregate.data.beacon_block_root,
# finalized_checkpoint.epoch) == store.finalized_checkpoint.root
# Only valid aggregates go in the list
if pool.nextAttestationEpoch.lenu64 <= aggregate_and_proof.aggregator_index:
@@ -929,22 +972,22 @@ proc validateBlsToExecutionChange*(
if res.isErr:
return pool.checkedReject(res.error)
# BLS to execution change signatures are batch-verified
let deferredCrypto = batchCrypto.scheduleBlsToExecutionChangeCheck(
pool.dag.cfg.genesisFork, signed_address_change)
if deferredCrypto.isErr():
return pool.checkedReject(deferredCrypto.error)
# BLS to execution change signatures are batch-verified
let deferredCrypto = batchCrypto.scheduleBlsToExecutionChangeCheck(
pool.dag.cfg.genesisFork, signed_address_change)
if deferredCrypto.isErr():
return pool.checkedReject(deferredCrypto.error)
let (cryptoFut, sig) = deferredCrypto.get()
case await cryptoFut
of BatchResult.Invalid:
return pool.checkedReject(
"SignedBLSToExecutionChange: invalid signature")
of BatchResult.Timeout:
return errIgnore(
"SignedBLSToExecutionChange: timeout checking signature")
of BatchResult.Valid:
discard # keep going only in this case
let (cryptoFut, sig) = deferredCrypto.get()
case await cryptoFut
of BatchResult.Invalid:
return pool.checkedReject(
"SignedBLSToExecutionChange: invalid signature")
of BatchResult.Timeout:
return errIgnore(
"SignedBLSToExecutionChange: timeout checking signature")
of BatchResult.Valid:
discard # keep going only in this case
return ok()
@@ -1024,7 +1067,7 @@ proc validateVoluntaryExit*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/p2p-interface.md#sync_committee_subnet_id
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/p2p-interface.md#sync_committee_subnet_id
proc validateSyncCommitteeMessage*(
dag: ChainDAGRef,
quarantine: ref Quarantine,
@@ -1040,7 +1083,7 @@ proc validateSyncCommitteeMessage*(
# [IGNORE] The message's slot is for the current slot (with a
# `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e.
# `sync_committee_message.slot == current_slot`.
let v = check_propagation_slot_range(msg.slot, wallTime)
let v = check_slot_exact(msg.slot, wallTime)
if v.isErr():
return err(v.error())
@@ -1135,7 +1178,7 @@ proc validateContribution*(
# (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
# i.e. contribution.slot == current_slot.
block:
let v = check_propagation_slot_range(syncCommitteeSlot, wallTime)
let v = check_slot_exact(syncCommitteeSlot, wallTime)
if v.isErr(): # [IGNORE]
return err(v.error())
@@ -1265,7 +1308,7 @@ proc validateContribution*(
return ok((blck.bid, sig, participants))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#light_client_finality_update
proc validateLightClientFinalityUpdate*(
pool: var LightClientPool, dag: ChainDAGRef,
finality_update: ForkedLightClientFinalityUpdate,
@@ -1301,7 +1344,7 @@ proc validateLightClientFinalityUpdate*(
pool.latestForwardedFinalitySlot = finalized_slot
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
proc validateLightClientOptimisticUpdate*(
pool: var LightClientPool, dag: ChainDAGRef,
optimistic_update: ForkedLightClientOptimisticUpdate,

View File

@@ -528,7 +528,7 @@ func toValidationError(
# previously forwarded `optimistic_update`s
errIgnore($r.error)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
proc processLightClientFinalityUpdate*(
self: var LightClientProcessor, src: MsgSource,
finality_update: ForkedLightClientFinalityUpdate
@@ -543,7 +543,7 @@ proc processLightClientFinalityUpdate*(
self.latestFinalityUpdate = finality_update.toOptimistic
v
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
proc processLightClientOptimisticUpdate*(
self: var LightClientProcessor, src: MsgSource,
optimistic_update: ForkedLightClientOptimisticUpdate

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@@ -0,0 +1,95 @@
# Mainnet config
# Extends the mainnet preset
PRESET_BASE: 'mainnet'
# Free-form short name of the network that this configuration applies to - known
# canonical network names include:
# * 'mainnet' - there can be only one
# * 'prater' - testnet
# Must match the regex: [a-z0-9\-]
CONFIG_NAME: 'mainnet'
# Transition
# ---------------------------------------------------------------
# Estimated on Sept 15, 2022
TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000
# By default, don't use these params
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
# Genesis
# ---------------------------------------------------------------
# `2**14` (= 16,384)
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
# Dec 1, 2020, 12pm UTC
MIN_GENESIS_TIME: 1606824000
# Mainnet initial fork version, recommend altering for testnets
GENESIS_FORK_VERSION: 0x00000000
# 604800 seconds (7 days)
GENESIS_DELAY: 604800
# Forking
# ---------------------------------------------------------------
# Some forks are disabled for now:
# - These may be re-assigned to another fork-version later
# - Temporarily set to max uint64 value: 2**64 - 1
# Altair
ALTAIR_FORK_VERSION: 0x01000000
ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC
# Bellatrix
BELLATRIX_FORK_VERSION: 0x02000000
BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC
# Capella
CAPELLA_FORK_VERSION: 0x03000000
CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC
# Deneb
DENEB_FORK_VERSION: 0x04000000
DENEB_FORK_EPOCH: 18446744073709551615
# Time parameters
# ---------------------------------------------------------------
# 12 seconds
SECONDS_PER_SLOT: 12
# 14 (estimate from Eth1 mainnet)
SECONDS_PER_ETH1_BLOCK: 14
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**8 (= 256) epochs ~27 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**11 (= 2,048) Eth1 blocks ~8 hours
ETH1_FOLLOW_DISTANCE: 2048
# Validator cycle
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# Fork choice
# ---------------------------------------------------------------
# 40%
PROPOSER_SCORE_BOOST: 40
# Deposit contract
# ---------------------------------------------------------------
# Ethereum PoW Mainnet
DEPOSIT_CHAIN_ID: 1
DEPOSIT_NETWORK_ID: 1
DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa

View File

@@ -0,0 +1 @@
{"version":"capella","data":{"attested_header":{"beacon":{"slot":"6730912","proposer_index":"338774","parent_root":"0x08de36a4696c4f4136913ffc8cbc1e4a2f233e66b3102a6e196459ff6f51ef7b","state_root":"0x5528265425c00c6b0be8be960d566ea7a69880dc250692eda32893a9a3bc1e7e","body_root":"0xca24486c1f61926843ef5b175952452a4eab871ebe3e288096cedc7a1e4fcfd9"},"execution":{"parent_hash":"0x853b4c96c5810f1937f8b937681509be006fa0f3c3bacadf1f03e8f090745fa9","fee_recipient":"0x1f9090aae28b8a3dceadf281b0f12828e676c326","state_root":"0x7f1a69ff29e693832968082fe535267c45d51a927c2dad48d2a900865f178016","receipts_root":"0xb10b9d7bf8474462f1062ff0f1f281847b07bb9fd1acc0e660a18d3c7a3824d6","logs_bloom":"0x87e5e40b4100907cb88a6980db5711a39e134c2a55045c002193503217c8ac5ad010a78056e0642047a31b8b181c8bd282ed0a4798017d200c0640b609ffea39c868cc6cc11e1f2c7813421b52c861af88116d0d0556184250227c578861a6808334c0910a03c6bd88d8121424088d69f0104c620088e460e222007d14d8916ede20bb55b29cd003285d7c7083989013148126bda9d90438da6f42615db20cb88e0a1be3e823f6131eaa4dfd9763744fec46a0d9bf131b1790ece075848ac035c08c142390a94852227842309aecd0c0fc6941c8514c9035303501de4e93f004ffb1b00d12014104088d999021aa4886944a92985b7a63c789c40f6691023406","prev_randao":"0xdd9b5fab097278f656b224e5327ac2ea2e92bca66582714573e45bb4c34254de","block_number":"17548171","gas_limit":"30000000","gas_used":"15879704","timestamp":"1687594967","extra_data":"0x7273796e632d6275696c6465722e78797a","base_fee_per_gas":"12434872107","block_hash":"0xdb1d6cb76484b413c9357f6bcc2f2c38aa854e6df41a6fdf856a15b236515598","transactions_root":"0x8212a7dffa03f8838ecebd81c5a59d59200720dd64b7de0401a8d60be22706fc","withdrawals_root":"0x68c4fb463b1bf7e63b8347cbbf003ce10f96f3ef74b77f2da4a896e4dd289e3b"},"execution_branch":["0x7907fbf950b450274b3dbc16ffba288e2d8ed9abd1b06202e7a390b9b9ac7ae2","0x336488033fe5f3ef4ccc12af07b9370b92e553e35ecb4a337a1b1c0e4afe1e0e","0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71","0x30bcbea3a71dc7f3ef58b69113b2dffa2def1b54aeee7ca726957f29c6e8afcb"]},"finalized_header":{"beacon":{"slot":"6730848","proposer_index":"169529","parent_root":"0x6f2bd6883b751edc408277a7710fd839b8fcdef92ac8388c855a726a10663d79","state_root":"0xca6912065fbaf9f6ade91f4a3c6363b5e513e5d7a08b854bdbe4c547d54689f5","body_root":"0xdc72c765c2151ea6093724111dd71fdcd5085a5136fdf49aff3eb140ecc4b5ea"},"execution":{"parent_hash":"0x9f2a673aa997f84b8dcaf48204bf8e32ecfff4c950471354090c59aa02e47ad8","fee_recipient":"0x690b9a9e9aa1c9db991c7721a92d351db4fac990","state_root":"0x9b66f5b6f68374a86e3373948146726ab6711d1dc71750d2f48411ed3a74a986","receipts_root":"0x01bdaec4eb4787b7da1550cdf76c388c38f5dc11f5386f302a908aa0b9bfe39e","logs_bloom":"0x1db10687f54003d8b0340020cb11422e84a00a032c0c2572fd29140e14830da36c402bf9580081992190f8220010936002010644e8a13c3c0ad040f0b42e2c0ffcc06b28c628a82a2e4372cac25050a9d4c0201280e2084203935c46a820b6022f5044017266ac815145d10129184d4d221184e0103c5e2048084cd905d817442a4062f0c21013c3885d00784adc00089427e8831954365fc630166a14522c218f5805738331e0910b8f45e59890ccca2d8236c04021080a2ee384610128a023ec1b884680a900a194d84129263f28014a415130c11a30900a0565ca0a09ee116531e908350f08b0c015b0b473225b0684329014f998e06000689911d008f501","prev_randao":"0xcad7f85f1f647efa44a90aada92d1ba4e24e0bd18099450d7bdbf758c40b7087","block_number":"17548108","gas_limit":"30000000","gas_used":"15870907","timestamp":"1687594199","extra_data":"0x627920406275696c64657230783639","base_fee_per_gas":"13417129560","block_hash":"0xe4b939dcf42c7659a62c7d89841d4bea7dd86326581409b4850fdd6c188a2ded","transactions_root":"0x8f92f1610acf9dd2b5510c373d6a7b03b6bf5430998a341b7403eb8072743a51","withdrawals_root":"0xa6b130b3d990158fe0cdb1338e5a5f62f7b15cdd95fd7e7f75f1bd36c833af78"},"execution_branch":["0x6a487c3e35f65f2b2a736e0638e379f682ac9fe3ae077f44cdbed5cab0ee83d6","0x336488033fe5f3ef4ccc12af07b9370b92e553e35ecb4a337a1b1c0e4afe1e0e","0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71","0x7140650d46f4ce47555dd8d610d2cad816488aaf24e3c162f66ccb1d4e097eeb"]},"finality_branch":["0xa335030000000000000000000000000000000000000000000000000000000000","0x7a4d2e874c919ee6d9237014dfb6e32738fd56de9753be34687eee11f7d60d4d","0x3510fb835fab1a9149b9199601fb24041e407d54c4d088475bac4baa9b95831c","0xa66768b3c5be2b692507a3debf50f2df7873561a63cc37a95f28bb75f260e3cd","0x32e16b3a14abfce2b458c0065098a05fde3cfac009cd6bf9bdf903087dd9b11a","0xf0b5ef21d7e436ca95b538ed96dc62315aaff5b1ac2ea9e9e032b25fc4cbb7aa"],"sync_aggregate":{"sync_committee_bits":"0xf7ffeffbb7ffefbf7fffffffffdfeffdfffdfffef3f7fffffffffffef7bbfffffffbdfdfdcbdffedfff77dfeffbfffefffffffffffffffffffff7fff3fffffe7","sync_committee_signature":"0x919bce123518b9d071dd76d12c0bbd3ac9204ec4767c71ef4cb4eb7f288ab2c7653ce2bd3d2a301d4b98d345dc4164b40a24408d854f98823449d78e92b6f346ab50ae364b03d86064d0cb15850328411aceca3cf65cd0e06b2254aef95c4cbc"},"signature_slot":"6730913"}}

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1 @@
{"version":"capella","data":{"attested_header":{"beacon":{"slot":"6730936","proposer_index":"471998","parent_root":"0x707219eb23e75295ae6b34f253d411f549cf9f053c7d6a5fa1cc9cdac4ffaa23","state_root":"0x04559eedba9c1b83b107feaf399b77edf8ed4859978a7fbee4eb38386dc516e0","body_root":"0xab8e22ac1ac86b6e487ac4cf0ab5e0d132ad068b40df439fbbdfae3fa782c9e2"},"execution":{"parent_hash":"0x8aee0bef392c8c09882adfc4da9cca2eeb00d62f075a7581d6006d2f4e07c049","fee_recipient":"0x5124fcc2b3f99f571ad67d075643c743f38f1c34","state_root":"0x8a160ce6f32ddc2a5cba58e18c2a081843cb1b53bf8caad639206b49a17ca003","receipts_root":"0xa9cbf1e627ac0e53163a417aa4665b9889655fe93f134e430ce5f5ad96152d53","logs_bloom":"0x19206ca1468407683a00200c800203e160844a200f2c63067529500a505428091240030444020aa50043bc1339100503023145088a0639286213c576106425080842c1084c8c8dc80a22c0ce4079a0fa82887c409067a98911046508832b6a00983b433166c71d0110c1d41488083c0d001b9421e0194c084a050154901c2821840a02351ec4a1925260005e4ac00a0030ca00c3c94c810950ec004700b348388bc822e2412d683630000acd31838400240815a410080006a37ac92118360060d90310028383a10824c81472183c026105a801a2b9080a102605440a00b6e0a0217ea400030c4327080545a020024740821012707a5c48404051086100415805","prev_randao":"0xf9df27cb70f7641c3eb5c37eaa6c280f947e942a49ba9b8402c822b738e92e9c","block_number":"17548195","gas_limit":"30000000","gas_used":"9038961","timestamp":"1687595255","extra_data":"0x6631622e696f","base_fee_per_gas":"13892976382","block_hash":"0xcadbb9c8f14cf792d387675e6aa2481e448dbf815f6fc45114bca332f7b0b782","transactions_root":"0x0cda78b75e408e0193c55325882f8502fa80a8782084b7d0e76561e7f4bc0788","withdrawals_root":"0xce0b0820e2f718bdc15bf180ece1245efd84631fcd54ecac4f7aae665898a04c"},"execution_branch":["0x2ca90f7f3dd21d05aac1532a30408a87032b79a9c44543af59fb122031e1e350","0x336488033fe5f3ef4ccc12af07b9370b92e553e35ecb4a337a1b1c0e4afe1e0e","0xdb56114e00fdd4c1f85c892bf35ac9a89289aaecb1ebd0a96cde606a748b5d71","0x1031d488d3fc33675fba01982eac69540b1cab7846066f4396f17e82585c31e2"]},"sync_aggregate":{"sync_committee_bits":"0xfffffffff7ffefff7ffffffffffffffffffdffffffffffffffffffffffffffffffffffffdffffffffff7fffffffffffffffffffffffffffffffffffffffffffd","sync_committee_signature":"0xb5c6a7f6ea62e5ef45c3d2064de67de4dc63cd99ea1647580c6259550e0998fdf439fba837556b3cf3dbe9cf99df022507d29c1b1d2534ff7cecdebdfe9e6e516dd9f95ce1bf7e354648fd69cdb2f2452444114b398026f7ff4d6e1065143d1b"},"signature_slot":"6730937"}}

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,354 @@
/**
* beacon_chain
* Copyright (c) 2023 Status Research & Development GmbH
* Licensed and distributed under either of
* * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
* * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
* at your option. This file may not be copied, modified, or distributed except according to those terms.
*/
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "libnimbus_lc.h"
#define check(condition) \
do { \
if (!(condition)) { \
printf("assertion failed: %s - %s @ %s:%d", \
#condition, __func__, __FILE__, __LINE__); \
exit(1); \
} \
} while (0)
#ifndef __DIR__
#define __DIR__ "."
#endif
ETH_RESULT_USE_CHECK
static void *readEntireFile(const char *path, int *numBytes)
{
int err;
FILE *file = fopen(path, "rb");
check(file);
err = fseek(file, 0, SEEK_END);
check(!err);
long size = ftell(file);
check(size >= 0);
err = fseek(file, 0, SEEK_SET);
check(!err);
char *buffer = malloc((size_t) size + 1);
check(buffer);
size_t actualSize = fread(buffer, 1, (size_t) size, file);
check(actualSize == (size_t) size);
buffer[size] = '\0';
fclose(file);
if (numBytes) {
check(size <= INT_MAX);
*numBytes = (int) actualSize;
}
return buffer;
}
ETH_RESULT_USE_CHECK
static ETHConsensusConfig *loadCfg(const char *path)
{
void *fileContent = readEntireFile(path, /* numBytes: */ NULL);
ETHConsensusConfig *cfg = ETHConsensusConfigCreateFromYaml(fileContent);
check(cfg);
free(fileContent);
return cfg;
}
ETH_RESULT_USE_CHECK
static ETHBeaconState *loadGenesis(const ETHConsensusConfig *cfg, const char *path)
{
const char *consensusFork = ETHConsensusConfigGetConsensusVersionAtEpoch(cfg, /* epoch: */ 0);
check(consensusFork);
int numSszBytes;
void *sszBytes = readEntireFile(path, &numSszBytes);
ETHBeaconState *state = ETHBeaconStateCreateFromSsz(
cfg, consensusFork, sszBytes, numSszBytes);
check(state);
free(sszBytes);
return state;
}
static void printHexString(const void *bytes, int numBytes)
{
const uint8_t *bytes_ = bytes;
printf("0x");
for (int i = 0; i < numBytes; i++) {
printf("%02x", bytes_[i]);
}
}
static void printGweiString(const ETHUInt256 *wei)
{
ETHUInt256 value;
memcpy(&value, wei, sizeof value);
char weiString[80];
int o = 0;
for (;;) {
bool isZero = true;
for (size_t i = 0; i < sizeof value; i++) {
if (value.bytes[i]) {
isZero = false;
break;
}
}
if (isZero) {
break;
}
uint8_t remainder = 0;
for (int i = sizeof value - 1; i >= 0; i--) {
uint16_t temp = (uint16_t) ((uint16_t) remainder << 8) | value.bytes[i];
value.bytes[i] = (uint8_t) (temp / 10);
remainder = temp % 10;
}
weiString[o++] = '0' + (char) remainder;
}
if (!o) {
weiString[o++] = '0';
}
if (o < 9) {
printf("0");
} else {
while (o > 9) {
printf("%c", weiString[--o]);
}
}
int z = 0;
while (z < o && weiString[z] == '0') {
z++;
}
if (o > z) {
printf(".");
while (o > z) {
printf("%c", weiString[--o]);
}
}
}
static void visualizeHeader(const ETHLightClientHeader *header, const ETHConsensusConfig *cfg)
{
ETHRoot *beaconRoot = ETHLightClientHeaderCopyBeaconRoot(header, cfg);
printf(" - beacon: ");
printHexString(beaconRoot, sizeof *beaconRoot);
printf("\n");
ETHRootDestroy(beaconRoot);
const ETHBeaconBlockHeader *beacon = ETHLightClientHeaderGetBeacon(header);
int beaconSlot = ETHBeaconBlockHeaderGetSlot(beacon);
printf(" - slot: %d\n", beaconSlot);
int beaconProposerIndex = ETHBeaconBlockHeaderGetProposerIndex(beacon);
printf(" - proposer_index: %d\n", beaconProposerIndex);
const ETHRoot *beaconParentRoot = ETHBeaconBlockHeaderGetParentRoot(beacon);
printf(" - parent_root: ");
printHexString(beaconParentRoot, sizeof *beaconParentRoot);
printf("\n");
const ETHRoot *beaconStateRoot = ETHBeaconBlockHeaderGetStateRoot(beacon);
printf(" - state_root: ");
printHexString(beaconStateRoot, sizeof *beaconStateRoot);
printf("\n");
const ETHRoot *beaconBodyRoot = ETHBeaconBlockHeaderGetBodyRoot(beacon);
printf(" - body_root: ");
printHexString(beaconBodyRoot, sizeof *beaconBodyRoot);
printf("\n");
ETHRoot *executionHash = ETHLightClientHeaderCopyExecutionHash(header, cfg);
printf(" - execution: ");
printHexString(executionHash, sizeof *executionHash);
printf("\n");
ETHRootDestroy(executionHash);
const ETHExecutionPayloadHeader *execution = ETHLightClientHeaderGetExecution(header);
const ETHRoot *executionParentHash = ETHExecutionPayloadHeaderGetParentHash(execution);
printf(" - parent_hash: ");
printHexString(executionParentHash, sizeof *executionParentHash);
printf("\n");
const ETHExecutionAddress *executionFeeRecipient =
ETHExecutionPayloadHeaderGetFeeRecipient(execution);
printf(" - fee_recipient: ");
printHexString(executionFeeRecipient, sizeof *executionFeeRecipient);
printf("\n");
const ETHRoot *executionStateRoot = ETHExecutionPayloadHeaderGetStateRoot(execution);
printf(" - state_root: ");
printHexString(executionStateRoot, sizeof *executionStateRoot);
printf("\n");
const ETHRoot *executionReceiptsRoot = ETHExecutionPayloadHeaderGetReceiptsRoot(execution);
printf(" - receipts_root: ");
printHexString(executionReceiptsRoot, sizeof *executionReceiptsRoot);
printf("\n");
const ETHLogsBloom *executionLogsBloom = ETHExecutionPayloadHeaderGetLogsBloom(execution);
printf(" - logs_bloom: ");
printHexString(executionLogsBloom, sizeof *executionLogsBloom);
printf("\n");
const ETHRoot *executionPrevRandao = ETHExecutionPayloadHeaderGetPrevRandao(execution);
printf(" - prev_randao: ");
printHexString(executionPrevRandao, sizeof *executionPrevRandao);
printf("\n");
int executionBlockNumber = ETHExecutionPayloadHeaderGetBlockNumber(execution);
printf(" - block_number: %d\n", executionBlockNumber);
int executionGasLimit = ETHExecutionPayloadHeaderGetGasLimit(execution);
printf(" - gas_limit: %d\n", executionGasLimit);
int executionGasUsed = ETHExecutionPayloadHeaderGetGasUsed(execution);
printf(" - gas_used: %d\n", executionGasUsed);
int executionTimestamp = ETHExecutionPayloadHeaderGetTimestamp(execution);
printf(" - timestamp: %d\n", executionTimestamp);
const void *executionExtraDataBytes = ETHExecutionPayloadHeaderGetExtraDataBytes(execution);
int numExecutionExtraDataBytes = ETHExecutionPayloadHeaderGetNumExtraDataBytes(execution);
printf(" - extra_data: ");
printHexString(executionExtraDataBytes, numExecutionExtraDataBytes);
printf("\n");
const ETHUInt256 *executionBaseFeePerGas = ETHExecutionPayloadHeaderGetBaseFeePerGas(execution);
printf(" - base_fee_per_gas: ");
printGweiString(executionBaseFeePerGas);
printf(" Gwei\n");
int executionDataGasUsed = ETHExecutionPayloadHeaderGetDataGasUsed(execution);
printf(" - data_gas_used: %d\n", executionDataGasUsed);
int executionExcessDataGas = ETHExecutionPayloadHeaderGetExcessDataGas(execution);
printf(" - excess_data_gas: %d\n", executionExcessDataGas);
}
ETH_RESULT_USE_CHECK
int main(void)
{
NimMain();
ETHRandomNumber *rng = ETHRandomNumberCreate();
check(rng);
ETHConsensusConfig *cfg = loadCfg(__DIR__ "/test_files/config.yaml");
ETHBeaconState *genesisState = loadGenesis(cfg, __DIR__ "/test_files/genesis.ssz");
ETHRoot *genesisValRoot = ETHBeaconStateCopyGenesisValidatorsRoot(genesisState);
ETHForkDigests *forkDigests = ETHForkDigestsCreateFromState(cfg, genesisState);
ETHBeaconClock *beaconClock = ETHBeaconClockCreateFromState(genesisState);
ETHBeaconStateDestroy(genesisState);
printf("Current slot: %d\n", ETHBeaconClockGetSlot(beaconClock));
printf("\n");
const ETHRoot trustedBlockRoot = {{
0x15, 0xcf, 0x56, 0xeb, 0xf8, 0x87, 0xed, 0xe9,
0xcf, 0x3f, 0xc1, 0x0a, 0x26, 0xec, 0x83, 0x82,
0x86, 0x28, 0x93, 0x2c, 0x10, 0x0e, 0x42, 0xc9,
0x8c, 0x84, 0xf8, 0x3d, 0xa7, 0x10, 0xc8, 0x63
}};
int numBootstrapBytes;
void *bootstrapBytes = readEntireFile(__DIR__ "/test_files/bootstrap.ssz", &numBootstrapBytes);
ETHLightClientStore *store = ETHLightClientStoreCreateFromBootstrap(
cfg, &trustedBlockRoot,
"application/octet-stream", "capella", bootstrapBytes, numBootstrapBytes);
check(store);
free(bootstrapBytes);
int startPeriod;
int count;
int syncKind = ETHLightClientStoreGetNextSyncTask(store, beaconClock, &startPeriod, &count);
check(syncKind == kETHLcSyncKind_UpdatesByRange);
check(startPeriod == 800);
check(count > 0 && count <= 128);
printf("Sync task: UpdatesByRange(%d, %d)\n", startPeriod, count);
int latestProcessResult;
int numUpdatesBytes;
void *updatesBytes = readEntireFile(__DIR__ "/test_files/updates.ssz", &numUpdatesBytes);
latestProcessResult = ETHLightClientStoreProcessUpdatesByRange(
store, cfg, forkDigests, genesisValRoot, beaconClock,
startPeriod, count, "application/octet-stream", updatesBytes, numUpdatesBytes);
check(!latestProcessResult);
free(updatesBytes);
int millisecondsToNextSyncTask = ETHLightClientStoreGetMillisecondsToNextSyncTask(
store, rng, beaconClock, latestProcessResult);
printf("Next sync task: %d.%03ds\n",
millisecondsToNextSyncTask / 1000,
millisecondsToNextSyncTask % 1000);
int numFinUpdateBytes;
void *finUpdateBytes = readEntireFile(__DIR__ "/test_files/finUpdate.ssz", &numFinUpdateBytes);
latestProcessResult = ETHLightClientStoreProcessFinalityUpdate(
store, cfg, forkDigests, genesisValRoot, beaconClock,
"application/octet-stream", "capella", finUpdateBytes, numFinUpdateBytes);
check(!latestProcessResult);
free(finUpdateBytes);
int numOptUpdateBytes;
void *optUpdateBytes = readEntireFile(__DIR__ "/test_files/optUpdate.ssz", &numOptUpdateBytes);
latestProcessResult = ETHLightClientStoreProcessOptimisticUpdate(
store, cfg, forkDigests, genesisValRoot, beaconClock,
"application/octet-stream", "capella", optUpdateBytes, numOptUpdateBytes);
check(!latestProcessResult);
free(optUpdateBytes);
finUpdateBytes = readEntireFile(__DIR__ "/test_files/finUpdate.json", &numFinUpdateBytes);
latestProcessResult = ETHLightClientStoreProcessFinalityUpdate(
store, cfg, forkDigests, genesisValRoot, beaconClock,
"application/json", /* consensusVersion: */ NULL, finUpdateBytes, numFinUpdateBytes);
check(!latestProcessResult);
free(finUpdateBytes);
optUpdateBytes = readEntireFile(__DIR__ "/test_files/optUpdate.json", &numOptUpdateBytes);
latestProcessResult = ETHLightClientStoreProcessOptimisticUpdate(
store, cfg, forkDigests, genesisValRoot, beaconClock,
"application/json", /* consensusVersion: */ NULL, optUpdateBytes, numOptUpdateBytes);
check(!latestProcessResult);
free(optUpdateBytes);
printf("\n");
printf("- finalized_header\n");
visualizeHeader(ETHLightClientStoreGetFinalizedHeader(store), cfg);
bool isNextSyncCommitteeKnown = ETHLightClientStoreIsNextSyncCommitteeKnown(store);
printf("- next_sync_committee: %s\n", isNextSyncCommitteeKnown ? "known" : "unknown");
printf("- optimistic_header\n");
visualizeHeader(ETHLightClientStoreGetOptimisticHeader(store), cfg);
int safetyThreshold = ETHLightClientStoreGetSafetyThreshold(store);
printf("- safety_threshold: %d\n", safetyThreshold);
ETHLightClientStoreDestroy(store);
ETHBeaconClockDestroy(beaconClock);
ETHForkDigestsDestroy(forkDigests);
ETHRootDestroy(genesisValRoot);
ETHConsensusConfigDestroy(cfg);
ETHRandomNumberDestroy(rng);
return 0;
}

View File

@@ -353,7 +353,7 @@ proc installMessageValidators*(
digest = forkDigests[].atConsensusFork(contextFork)
# light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#light_client_finality_update
lightClient.network.addValidator(
getLightClientFinalityUpdateTopic(digest), proc (
msg: lcDataFork.LightClientFinalityUpdate
@@ -361,7 +361,7 @@ proc installMessageValidators*(
validate(msg, contextFork, processLightClientFinalityUpdate))
# light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
lightClient.network.addValidator(
getLightClientOptimisticUpdateTopic(digest), proc (
msg: lcDataFork.LightClientOptimisticUpdate

View File

@@ -341,7 +341,7 @@ func shortProtocolId(protocolId: string): string =
proc openStream(node: Eth2Node,
peer: Peer,
protocolId: string): Future[Connection] {.async.} =
# When dialling here, we do not provide addresses - all new connection
# When dialing here, we do not provide addresses - all new connection
# attempts are handled via `connect` which also takes into account
# reconnection timeouts
let
@@ -354,6 +354,10 @@ proc init(T: type Peer, network: Eth2Node, peerId: PeerId): Peer {.gcsafe.}
func peerId*(node: Eth2Node): PeerId =
node.switch.peerInfo.peerId
func nodeId*(node: Eth2Node): NodeId =
# `secp256k1` keys are always stored inside PeerId.
toNodeId(keys.PublicKey(node.switch.peerInfo.publicKey.skkey))
func enrRecord*(node: Eth2Node): Record =
node.discovery.localNode.record
@@ -716,7 +720,7 @@ proc uncompressFramedStream(conn: Connection,
doAssert maxCompressedFrameDataLen >= maxUncompressedFrameDataLen.uint64
var
frameData = newSeq[byte](maxCompressedFrameDataLen + 4)
frameData = newSeqUninitialized[byte](maxCompressedFrameDataLen + 4)
output = newSeqUninitialized[byte](expectedSize)
written = 0
@@ -2251,8 +2255,8 @@ proc getPersistentNetKeys*(
func gossipId(
data: openArray[byte], phase0Prefix, topic: string): seq[byte] =
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/p2p-interface.md#topics-and-messages
const
MESSAGE_DOMAIN_INVALID_SNAPPY = [0x00'u8, 0x00, 0x00, 0x00]
MESSAGE_DOMAIN_VALID_SNAPPY = [0x01'u8, 0x00, 0x00, 0x00]
@@ -2271,8 +2275,14 @@ func gossipId(
proc newBeaconSwitch(config: BeaconNodeConf | LightClientConf,
seckey: PrivateKey, address: MultiAddress,
rng: ref HmacDrbgContext): Switch {.raises: [Defect, CatchableError].} =
SwitchBuilder
.new()
var sb =
if config.enableYamux:
SwitchBuilder.new().withYamux()
else:
SwitchBuilder.new()
# Order of multiplexers matters, the first will be default
sb
.withPrivateKey(seckey)
.withAddress(address)
.withRng(rng)
@@ -2544,7 +2554,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
node.metadata.seq_number += 1
node.metadata.attnets = attnets
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#phase-0-attestation-subnet-stability
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#attestation-subnet-subscription
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
let res = node.discovery.updateRecord({
enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets)
@@ -2557,7 +2567,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
debug "Stability subnets changed; updated ENR attnets", attnets
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#sync-committee-subnet-stability
if node.metadata.syncnets == syncnets:
return
@@ -2663,6 +2673,14 @@ proc broadcastBeaconBlock*(
let topic = getBeaconBlocksTopic(node.forkDigests.deneb)
node.broadcast(topic, blck)
proc broadcastBlobSidecar*(
node: Eth2Node, subnet_id: SubnetId, blob: deneb.SignedBlobSidecar):
Future[SendResult] =
let
forkPrefix = node.forkDigestAtEpoch(node.getWallEpoch)
topic = getBlobSidecarTopic(forkPrefix, subnet_id)
node.broadcast(topic, blob)
proc broadcastSyncCommitteeMessage*(
node: Eth2Node, msg: SyncCommitteeMessage,
subcommitteeIdx: SyncSubcommitteeIndex): Future[SendResult] =

View File

@@ -255,9 +255,6 @@ elif const_preset == "mainnet":
{.compile: "network_metadata_mainnet.S".}
const
eth2NetworksDir = vendorDir & "/eth2-networks"
sepoliaDir = vendorDir & "/sepolia"
mainnetMetadata = loadCompileTimeNetworkMetadata(
vendorDir & "/eth2-networks/shared/mainnet", some mainnet, not incbinEnabled)
praterMetadata = loadCompileTimeNetworkMetadata(

View File

@@ -324,7 +324,8 @@ proc initFullNode(
blobQuarantine = newClone(BlobQuarantine())
consensusManager = ConsensusManager.new(
dag, attestationPool, quarantine, node.elManager,
ActionTracker.init(rng, config.subscribeAllSubnets),
ActionTracker.init(rng, node.network.nodeId, config.subscribeAllSubnets,
config.useOldStabilitySubnets),
node.dynamicFeeRecipientsStore, config.validatorsDir,
config.defaultFeeRecipient, config.suggestedGasLimit)
blockProcessor = BlockProcessor.new(
@@ -368,6 +369,8 @@ proc initFullNode(
resfut,
maybeFinalized = maybeFinalized)
resfut
processor = Eth2Processor.new(
config.doppelgangerDetection,
blockProcessor, node.validatorMonitor, dag, attestationPool,
@@ -385,6 +388,10 @@ proc initFullNode(
router = (ref MessageRouter)(
processor: processor,
network: node.network)
requestManager = RequestManager.init(
node.network, dag.cfg.DENEB_FORK_EPOCH, getBeaconTime,
(proc(): bool = syncManager.inProgress),
quarantine, blobQuarantine, rmanBlockVerifier)
if node.config.lightClientDataServe:
proc scheduleSendingLightClientUpdates(slot: Slot) =
@@ -416,12 +423,7 @@ proc initFullNode(
node.processor = processor
node.blockProcessor = blockProcessor
node.consensusManager = consensusManager
node.requestManager = RequestManager.init(node.network,
dag.cfg.DENEB_FORK_EPOCH,
getBeaconTime,
quarantine,
blobQuarantine,
rmanBlockVerifier)
node.requestManager = requestManager
node.syncManager = syncManager
node.backfiller = backfiller
node.router = router
@@ -689,6 +691,7 @@ proc init*(T: type BeaconNode,
config.secretsDir,
config.defaultFeeRecipient,
config.suggestedGasLimit,
config.getPayloadBuilderAddress,
getValidatorAndIdx,
getBeaconTime,
getForkForEpoch,
@@ -769,7 +772,7 @@ func forkDigests(node: BeaconNode): auto =
node.dag.forkDigests.deneb]
forkDigestsArray
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#phase-0-attestation-subnet-stability
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#attestation-subnet-subscription
proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) =
if node.gossipState.card == 0:
# When disconnected, updateGossipState is responsible for all things
@@ -1141,6 +1144,15 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
# Things we do when slot processing has ended and we're about to wait for the
# next slot
# By waiting until close before slot end, ensure that preparation for next
# slot does not interfere with propagation of messages and with VC duties.
const endOffset = aggregateSlotOffset + nanos(
(NANOSECONDS_PER_SLOT - aggregateSlotOffset.nanoseconds.uint64).int64 div 2)
let endCutoff = node.beaconClock.fromNow(slot.start_beacon_time + endOffset)
if endCutoff.inFuture:
debug "Waiting for slot end", slot, endCutoff = shortLog(endCutoff.offset)
await sleepAsync(endCutoff.offset)
if node.dag.needStateCachesAndForkChoicePruning():
if node.attachedValidators[].validators.len > 0:
node.attachedValidators[]
@@ -1309,6 +1321,8 @@ proc onSlotStart(node: BeaconNode, wallTime: BeaconTime,
finalizedEpoch = node.dag.finalizedHead.blck.slot.epoch()
delay = wallTime - expectedSlot.start_beacon_time()
node.processingDelay = Opt.some(nanoseconds(delay.nanoseconds))
info "Slot start",
slot = shortLog(wallSlot),
epoch = shortLog(wallSlot.epoch),
@@ -1389,16 +1403,9 @@ proc handleMissingBlobs(node: BeaconNode) =
debug "Requesting detected missing blobs", blobs = shortLog(fetches)
node.requestManager.fetchMissingBlobs(fetches)
proc handleMissingBlocks(node: BeaconNode) =
let missingBlocks = node.quarantine[].checkMissing()
if missingBlocks.len > 0:
debug "Requesting detected missing blocks", blocks = shortLog(missingBlocks)
node.requestManager.fetchAncestorBlocks(missingBlocks)
proc onSecond(node: BeaconNode, time: Moment) =
## This procedure will be called once per second.
if not(node.syncManager.inProgress):
node.handleMissingBlocks()
node.handleMissingBlobs()
# Nim GC metrics (for the main thread)
@@ -1520,7 +1527,7 @@ proc installMessageValidators(node: BeaconNode) =
when consensusFork >= ConsensusFork.Altair:
# sync_committee_{subnet_id}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/p2p-interface.md#sync_committee_subnet_id
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/p2p-interface.md#sync_committee_subnet_id
for subcommitteeIdx in SyncSubcommitteeIndex:
closureScope: # Needed for inner `proc`; don't lift it out of loop.
let idx = subcommitteeIdx
@@ -1543,8 +1550,7 @@ proc installMessageValidators(node: BeaconNode) =
MsgSource.gossip, msg)))
when consensusFork >= ConsensusFork.Capella:
# sync_committee_contribution_and_proof
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/p2p-interface.md#bls_to_execution_change
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/p2p-interface.md#bls_to_execution_change
node.network.addAsyncValidator(
getBlsToExecutionChangeTopic(digest), proc (
msg: SignedBLSToExecutionChange
@@ -1555,12 +1561,12 @@ proc installMessageValidators(node: BeaconNode) =
when consensusFork >= ConsensusFork.Deneb:
# blob_sidecar_{index}
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blob_sidecar_index
for i in 0 ..< MAX_BLOBS_PER_BLOCK:
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id
for i in 0 ..< BLOB_SIDECAR_SUBNET_COUNT:
closureScope: # Needed for inner `proc`; don't lift it out of loop.
let idx = i
node.network.addValidator(
getBlobSidecarTopic(digest, idx), proc (
getBlobSidecarTopic(digest, SubnetId(idx)), proc (
signedBlobSidecar: SignedBlobSidecar
): ValidationResult =
toValidationResult(

View File

@@ -113,7 +113,7 @@ programMain:
template payload(): auto = blck.message.body.execution_payload
if elManager != nil and not payload.block_hash.isZero:
discard await elManager.newExecutionPayload(blck.message.body)
discard await elManager.newExecutionPayload(blck.message)
discard await elManager.forkchoiceUpdated(
headBlockHash = payload.block_hash,
safeBlockHash = payload.block_hash, # stub value
@@ -124,7 +124,7 @@ programMain:
template payload(): auto = blck.message.body.execution_payload
if elManager != nil and not payload.block_hash.isZero:
discard await elManager.newExecutionPayload(blck.message.body)
discard await elManager.newExecutionPayload(blck.message)
discard await elManager.forkchoiceUpdated(
headBlockHash = payload.block_hash,
safeBlockHash = payload.block_hash, # stub value

View File

@@ -45,7 +45,7 @@ type
SigningNodeError* = object of CatchableError
proc validate(key: string, value: string): int =
func validate(key: string, value: string): int =
case key
of "{validator_key}":
0
@@ -219,10 +219,9 @@ proc installApiHandlers*(node: SigningNodeRef) =
let
forkInfo = request.forkInfo.get()
blockRoot = hash_tree_root(request.beaconBlock)
signature = withBlck(request.beaconBlock):
get_block_signature(forkInfo.fork,
forkInfo.genesis_validators_root, blck.slot, blockRoot,
validator.data.privateKey).toValidatorSig().toHex()
signature = get_block_signature(forkInfo.fork,
forkInfo.genesis_validators_root, request.beaconBlock.data.slot,
blockRoot, validator.data.privateKey).toValidatorSig().toHex()
return signatureResponse(Http200, signature)
let (feeRecipientIndex, blockHeader) =
@@ -231,12 +230,10 @@ proc installApiHandlers*(node: SigningNodeRef) =
# `phase0` and `altair` blocks do not have `fee_recipient`, so
# we return an error.
return errorResponse(Http400, BlockIncorrectFork)
of ConsensusFork.Bellatrix:
(GeneralizedIndex(401), request.beaconBlock.bellatrixData)
of ConsensusFork.Capella:
(GeneralizedIndex(401), request.beaconBlock.capellaData)
of ConsensusFork.Bellatrix, ConsensusFork.Capella:
(GeneralizedIndex(401), request.beaconBlock.data)
of ConsensusFork.Deneb:
(GeneralizedIndex(801), request.beaconBlock.denebData)
(GeneralizedIndex(801), request.beaconBlock.data)
if request.proofs.isNone() or len(request.proofs.get()) == 0:
return errorResponse(Http400, MissingMerkleProofError)
@@ -258,10 +255,9 @@ proc installApiHandlers*(node: SigningNodeRef) =
let
forkInfo = request.forkInfo.get()
blockRoot = hash_tree_root(request.beaconBlock)
signature = withBlck(request.beaconBlock):
get_block_signature(forkInfo.fork,
forkInfo.genesis_validators_root, blck.slot, blockRoot,
validator.data.privateKey).toValidatorSig().toHex()
signature = get_block_signature(forkInfo.fork,
forkInfo.genesis_validators_root, request.beaconBlock.data.slot,
blockRoot, validator.data.privateKey).toValidatorSig().toHex()
signatureResponse(Http200, signature)
of Web3SignerRequestKind.Deposit:
let

View File

@@ -177,6 +177,8 @@ proc runVCSlotLoop(vc: ValidatorClientRef) {.async.} =
if checkIfShouldStopAtEpoch(wallSlot, vc.config.stopAtEpoch):
return
vc.processingDelay = Opt.some(nanoseconds(delay.nanoseconds))
if len(vc.beaconNodes) > 1:
let
counts = vc.getNodeCounts()
@@ -197,7 +199,8 @@ proc runVCSlotLoop(vc: ValidatorClientRef) {.async.} =
blockIn = vc.getDurationToNextBlock(wallSlot),
validators = vc.attachedValidators[].count(),
good_nodes = goodNodes, viable_nodes = viableNodes,
bad_nodes = badNodes, delay = shortLog(delay)
bad_nodes = badNodes,
delay = shortLog(delay)
else:
info "Slot start",
slot = shortLog(wallSlot),
@@ -336,6 +339,7 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {.async.} =
vc.config.secretsDir,
vc.config.defaultFeeRecipient,
vc.config.suggestedGasLimit,
Opt.none(string),
nil,
vc.beaconClock.getBeaconTimeFn,
getForkForEpoch,

View File

@@ -848,10 +848,32 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
doAssert strictVerification notin node.dag.updateFlags
return RestApiResponse.jsonError(Http400, InvalidBlockObjectError)
withBlck(forked):
case restBlock.kind
of ConsensusFork.Phase0:
var blck = restBlock.phase0Data
blck.root = hash_tree_root(blck.message)
# TODO: Fetch blobs from EE when blck is deneb.SignedBeaconBlock
await node.router.routeSignedBeaconBlock(blck)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
of ConsensusFork.Altair:
var blck = restBlock.altairData
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
of ConsensusFork.Bellatrix:
var blck = restBlock.bellatrixData
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
of ConsensusFork.Capella:
var blck = restBlock.capellaData
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
of ConsensusFork.Deneb:
var blck = restBlock.denebData.signed_block
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(
blck, Opt.some(asSeq restBlock.denebData.signed_blob_sidecars))
if res.isErr():
return RestApiResponse.jsonError(
@@ -950,7 +972,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
let res = withBlck(forked):
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
if res.isErr():
return RestApiResponse.jsonError(

View File

@@ -88,7 +88,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_VOLUNTARY_EXITS:
Base10.toString(MAX_VOLUNTARY_EXITS),
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/altair.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/altair.yaml
INACTIVITY_PENALTY_QUOTIENT_ALTAIR:
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR),
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR:
@@ -104,7 +104,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
UPDATE_TIMEOUT:
Base10.toString(UPDATE_TIMEOUT),
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/bellatrix.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/bellatrix.yaml
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX:
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX),
MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX:
@@ -120,7 +120,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_EXTRA_DATA_BYTES:
Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)),
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/capella.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/capella.yaml
MAX_BLS_TO_EXECUTION_CHANGES:
Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)),
MAX_WITHDRAWALS_PER_PAYLOAD:
@@ -253,14 +253,13 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#constants
TARGET_AGGREGATORS_PER_COMMITTEE:
Base10.toString(TARGET_AGGREGATORS_PER_COMMITTEE),
RANDOM_SUBNETS_PER_VALIDATOR:
Base10.toString(RANDOM_SUBNETS_PER_VALIDATOR),
RANDOM_SUBNETS_PER_VALIDATOR: "1",
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION:
Base10.toString(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION),
ATTESTATION_SUBNET_COUNT:
Base10.toString(ATTESTATION_SUBNET_COUNT),
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#constants
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#constants
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE:
Base10.toString(uint64(TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)),
SYNC_COMMITTEE_SUBNET_COUNT:

View File

@@ -183,8 +183,6 @@ const
"No Finalized Snapshot Available"
NoImplementationError* =
"Not implemented yet"
KeystoreAdditionFailure =
"Could not add some keystores"
InvalidKeystoreObjects* =
"Invalid keystore objects found"
InvalidValidatorPublicKey* =
@@ -241,3 +239,6 @@ const
"The given merkle proof index is invalid"
FailedToObtainForkError* =
"Failed to obtain fork information"
InvalidTimestampValue* =
"Invalid or missing timestamp value"

View File

@@ -428,3 +428,27 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
all_peers: allPeers
)
)
router.api(MethodPost, "/nimbus/v1/timesync") do (
contentBody: Option[ContentBody]) -> RestApiResponse:
let
timestamp2 = getTimestamp()
timestamp1 =
block:
if contentBody.isNone():
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
let dres = decodeBody(RestNimbusTimestamp1, contentBody.get())
if dres.isErr():
return RestApiResponse.jsonError(Http400,
InvalidTimestampValue,
$dres.error())
dres.get().timestamp1
let
delay = node.processingDelay.valueOr: ZeroDuration
response = RestNimbusTimestamp2(
timestamp1: timestamp1,
timestamp2: timestamp2,
timestamp3: getTimestamp(),
delay: uint64(delay.nanoseconds)
)
return RestApiResponse.jsonResponsePlain(response)

View File

@@ -10,7 +10,7 @@
import std/[options, macros],
stew/byteutils, presto,
../spec/[forks],
../spec/eth2_apis/[rest_types, eth2_rest_serialization],
../spec/eth2_apis/[rest_types, eth2_rest_serialization, rest_common],
../validators/validator_duties,
../consensus_object_pools/blockchain_dag,
../beacon_node,
@@ -18,7 +18,7 @@ import std/[options, macros],
export
options, eth2_rest_serialization, blockchain_dag, presto, rest_types,
rest_constants
rest_constants, rest_common
type
ValidatorIndexError* {.pure.} = enum
@@ -71,7 +71,7 @@ func getBlockSlotId*(node: BeaconNode,
return err("Requesting state too far ahead of current head")
let bsi = node.dag.getBlockIdAtSlot(stateIdent.slot).valueOr:
return err("State for given slot not found, history not available?")
return err("History for given slot not available")
ok(bsi)
@@ -79,8 +79,21 @@ func getBlockSlotId*(node: BeaconNode,
if stateIdent.root == getStateRoot(node.dag.headState):
ok(node.dag.head.bid.atSlot())
else:
# The `state_roots` field holds 8k historical state roots but not the
# one of the current state - this trick allows us to lookup states without
# keeping an on-disk index.
let headSlot = getStateField(node.dag.headState, slot)
for i in 0'u64..<SLOTS_PER_HISTORICAL_ROOT:
if i >= headSlot:
break
if getStateField(node.dag.headState, state_roots).item(
(headSlot - i - 1) mod SLOTS_PER_HISTORICAL_ROOT) ==
stateIdent.root:
return node.dag.getBlockIdAtSlot(headSlot - i - 1).orErr(
cstring("History for for given root not available"))
# We don't have a state root -> BlockSlot mapping
err("State for given root not found")
err("State root not found - use by-slot lookup to query deep state history")
of StateQueryKind.Named:
case stateIdent.value
of StateIdentType.Head:

View File

@@ -395,14 +395,6 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
let res =
case node.dag.cfg.consensusForkAtEpoch(qslot.epoch)
of ConsensusFork.Deneb:
# TODO
# We should return a block with sidecars here
# https://github.com/ethereum/beacon-APIs/pull/302/files
# The code paths leading to makeBeaconBlockForHeadAndSlot are already
# partially refactored to make it possible to return the blobs from
# the call, but the signature of the call needs to be changed furhter
# to access the blobs here.
discard $denebImplementationMissing
await makeBeaconBlockForHeadAndSlot(
deneb.ExecutionPayloadForSigning,
node, qrandao, proposer, qgraffiti, qhead, qslot)
@@ -418,17 +410,43 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http400, InvalidSlotValueError)
if res.isErr():
return RestApiResponse.jsonError(Http400, res.error())
res.get.blck
res.get
return
if contentType == sszMediaType:
let headers = [("eth-consensus-version", message.kind.toString())]
withBlck(message):
withBlck(message.blck):
let data =
when blck is deneb.BeaconBlock:
let bundle = message.blobsBundleOpt.get()
let blockRoot = hash_tree_root(blck)
var sidecars = newSeqOfCap[BlobSidecar](bundle.blobs.len)
for i in 0..<bundle.blobs.len:
let sidecar = deneb.BlobSidecar(
block_root: blockRoot,
index: BlobIndex(i),
slot: blck.slot,
block_parent_root: blck.parent_root,
proposer_index: blck.proposer_index,
blob: bundle.blobs[i],
kzg_commitment: bundle.kzgs[i],
kzg_proof: bundle.proofs[i]
)
sidecars.add(sidecar)
DenebBlockContents(
`block`: blck,
blob_sidecars: List[BlobSidecar,
Limit MAX_BLOBS_PER_BLOCK].init(sidecars))
elif blck is phase0.BeaconBlock or blck is altair.BeaconBlock or
blck is bellatrix.BeaconBlock or blck is capella.BeaconBlock:
blck
else:
static: raiseAssert "produceBlockV2 received unexpected version"
if contentType == sszMediaType:
let headers = [("eth-consensus-version", message.blck.kind.toString())]
RestApiResponse.sszResponse(blck, headers)
elif contentType == jsonMediaType:
withBlck(message):
RestApiResponse.jsonResponseWVersion(blck, message.kind)
else:
raiseAssert "preferredContentType() returns invalid content type"
elif contentType == jsonMediaType:
RestApiResponse.jsonResponseWVersion(blck, message.blck.kind)
else:
raiseAssert "preferredContentType() returns invalid content type"
# https://ethereum.github.io/beacon-APIs/#/Validator/produceBlindedBlock
# https://github.com/ethereum/beacon-APIs/blob/v2.4.0/apis/validator/blinded_block.yaml

View File

@@ -43,12 +43,12 @@ const
GENESIS_SLOT* = Slot(0)
GENESIS_EPOCH* = Epoch(0) # compute_epoch_at_slot(GENESIS_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#constant
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#constant
INTERVALS_PER_SLOT* = 3
FAR_FUTURE_BEACON_TIME* = BeaconTime(ns_since_genesis: int64.high())
NANOSECONDS_PER_SLOT = SECONDS_PER_SLOT * 1_000_000_000'u64
NANOSECONDS_PER_SLOT* = SECONDS_PER_SLOT * 1_000_000_000'u64
template ethTimeUnit*(typ: type) {.dirty.} =
func `+`*(x: typ, y: uint64): typ {.borrow.}
@@ -134,22 +134,22 @@ template `+`*(a: TimeDiff, b: Duration): TimeDiff =
const
# Offsets from the start of the slot to when the corresponding message should
# be sent
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#attesting
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#attesting
attestationSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#broadcast-aggregate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#broadcast-aggregate
aggregateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#prepare-sync-committee-message
syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#broadcast-sync-committee-contribution
syncContributionSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#sync-committee
lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#sync-committee
lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
@@ -189,7 +189,7 @@ func epoch*(slot: Slot): Epoch = # aka compute_epoch_at_slot
if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH
else: Epoch(slot div SLOTS_PER_EPOCH)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#compute_slots_since_epoch_start
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#compute_slots_since_epoch_start
func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start
## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`)
(slot mod SLOTS_PER_EPOCH)
@@ -217,7 +217,7 @@ iterator slots*(epoch: Epoch): Slot =
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
yield slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#sync-committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#sync-committee
template sync_committee_period*(epoch: Epoch): SyncCommitteePeriod =
if epoch == FAR_FUTURE_EPOCH: FAR_FUTURE_PERIOD
else: SyncCommitteePeriod(epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD)

View File

@@ -22,7 +22,7 @@ from ./datatypes/capella import BeaconState, ExecutionPayloadHeader, Withdrawal
export extras, forks, validator, chronicles
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#increase_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#increase_balance
func increase_balance*(balance: var Gwei, delta: Gwei) =
balance += delta
@@ -32,7 +32,7 @@ func increase_balance*(
if delta != 0: # avoid dirtying the balance cache if not needed
increase_balance(state.balances.mitem(index), delta)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#decrease_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#decrease_balance
func decrease_balance*(balance: var Gwei, delta: Gwei) =
balance =
if delta > balance:
@@ -72,7 +72,7 @@ func compute_activation_exit_epoch*(epoch: Epoch): Epoch =
## ``epoch`` take effect.
epoch + 1 + MAX_SEED_LOOKAHEAD
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#get_validator_churn_limit
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#get_validator_churn_limit
func get_validator_churn_limit*(
cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache):
uint64 =
@@ -134,8 +134,8 @@ func initiate_validator_exit*(
from ./datatypes/deneb import BeaconState
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_slashing_penalty*(state: ForkyBeaconState,
validator_effective_balance: Gwei): Gwei =
# TODO Consider whether this is better than splitting the functions apart; in
@@ -150,9 +150,9 @@ func get_slashing_penalty*(state: ForkyBeaconState,
else:
{.fatal: "invalid BeaconState type".}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_whistleblower_reward*(validator_effective_balance: Gwei): Gwei =
validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT
@@ -168,9 +168,9 @@ func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): G
else:
{.fatal: "invalid BeaconState type".}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#modified-slash_validator
proc slash_validator*(
cfg: RuntimeConfig, state: var ForkyBeaconState,
slashed_index: ValidatorIndex, cache: var StateCache):
@@ -380,7 +380,7 @@ proc is_valid_indexed_attestation*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#get_attesting_indices
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#get_attesting_indices
func get_attesting_indices*(state: ForkyBeaconState,
data: AttestationData,
bits: CommitteeValidatorsBits,
@@ -462,8 +462,12 @@ func check_attestation_target_epoch(
ok(data.target.epoch)
func check_attestation_inclusion(attestation_slot: Slot,
current_slot: Slot): Result[void, cstring] =
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#attestations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#modified-process_attestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/beacon-chain.md#modified-process_attestation
func check_attestation_inclusion(
consensusFork: static ConsensusFork, attestation_slot: Slot,
current_slot: Slot): Result[void, cstring] =
# Check for overflow
static:
doAssert SLOTS_PER_EPOCH >= MIN_ATTESTATION_INCLUSION_DELAY
@@ -473,8 +477,9 @@ func check_attestation_inclusion(attestation_slot: Slot,
if not (attestation_slot + MIN_ATTESTATION_INCLUSION_DELAY <= current_slot):
return err("Attestation too new")
if not (current_slot <= attestation_slot + SLOTS_PER_EPOCH):
return err("Attestation too old")
when consensusFork < ConsensusFork.Deneb:
if not (current_slot <= attestation_slot + SLOTS_PER_EPOCH):
return err("Attestation too old")
ok()
@@ -488,10 +493,9 @@ func check_attestation_index*(
Result[CommitteeIndex, cstring] =
check_attestation_index(data.index, committees_per_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices
func get_attestation_participation_flag_indices(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState,
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState,
data: AttestationData, inclusion_delay: uint64): seq[int] =
## Return the flag indices that are satisfied by an attestation.
let justified_checkpoint =
@@ -503,10 +507,14 @@ func get_attestation_participation_flag_indices(
# Matching roots
let
is_matching_source = data.source == justified_checkpoint
is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
is_matching_target =
is_matching_source and
data.target.root == get_block_root(state, data.target.epoch)
is_matching_head =
is_matching_target and
data.beacon_block_root == get_block_root_at_slot(state, data.slot)
# TODO probably this needs to be robustly failable
# Checked by check_attestation()
doAssert is_matching_source
var participation_flag_indices: seq[int]
@@ -519,11 +527,45 @@ func get_attestation_participation_flag_indices(
participation_flag_indices
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/beacon-chain.md#modified-get_attestation_participation_flag_indices
func get_attestation_participation_flag_indices(
state: deneb.BeaconState,
data: AttestationData, inclusion_delay: uint64): seq[int] =
## Return the flag indices that are satisfied by an attestation.
let justified_checkpoint =
if data.target.epoch == get_current_epoch(state):
state.current_justified_checkpoint
else:
state.previous_justified_checkpoint
# Matching roots
let
is_matching_source = data.source == justified_checkpoint
is_matching_target =
is_matching_source and
data.target.root == get_block_root(state, data.target.epoch)
is_matching_head =
is_matching_target and
data.beacon_block_root == get_block_root_at_slot(state, data.slot)
# Checked by check_attestation
doAssert is_matching_source
var participation_flag_indices: seq[int]
if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH):
participation_flag_indices.add(TIMELY_SOURCE_FLAG_INDEX)
if is_matching_target: # [Modified in Deneb:EIP7045]
participation_flag_indices.add(TIMELY_TARGET_FLAG_INDEX)
if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY:
participation_flag_indices.add(TIMELY_HEAD_FLAG_INDEX)
participation_flag_indices
# TODO these aren't great here
# TODO these duplicate some stuff in state_transition_epoch which uses TotalBalances
# better to centralize around that if feasible
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#get_total_active_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#get_total_active_balance
func get_total_active_balance*(state: ForkyBeaconState, cache: var StateCache): Gwei =
## Return the combined effective balance of the active validators.
## Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei
@@ -539,7 +581,7 @@ func get_total_active_balance*(state: ForkyBeaconState, cache: var StateCache):
cache.total_active_balance[epoch] = tab
return tab
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#get_base_reward_per_increment
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#get_base_reward_per_increment
func get_base_reward_per_increment_sqrt*(
total_active_balance_sqrt: uint64): Gwei =
EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR div total_active_balance_sqrt
@@ -548,7 +590,7 @@ func get_base_reward_per_increment*(
total_active_balance: Gwei): Gwei =
get_base_reward_per_increment_sqrt(integer_squareroot(total_active_balance))
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#get_base_reward
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#get_base_reward
func get_base_reward(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState,
@@ -559,7 +601,7 @@ func get_base_reward(
state.validators[index].effective_balance div EFFECTIVE_BALANCE_INCREMENT
increments * base_reward_per_increment
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#attestations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#attestations
proc check_attestation*(
state: ForkyBeaconState, attestation: SomeAttestation, flags: UpdateFlags,
cache: var StateCache): Result[void, cstring] =
@@ -575,7 +617,7 @@ proc check_attestation*(
data,
get_committee_count_per_slot(state, epoch, cache))
? check_attestation_inclusion(slot, state.slot)
? check_attestation_inclusion((typeof state).toFork, slot, state.slot)
let committee_len = get_beacon_committee_len(
state, slot, committee_index, cache)
@@ -665,26 +707,19 @@ proc process_attestation*(
? check_attestation(state, attestation, flags, cache)
# For phase0
template addPendingAttestation(attestations: typed) =
# The genericSeqAssign generated by the compiler to copy the attestation
# data sadly is a processing hotspot - the business with the addDefault
# pointer is here simply to work around the poor codegen
let pa = attestations.addDefault()
if pa.isNil:
return err("process_attestation: too many pending attestations")
assign(pa[].aggregation_bits, attestation.aggregation_bits)
pa[].data = attestation.data
pa[].inclusion_delay = state.slot - attestation.data.slot
pa[].proposer_index = proposer_index.uint64
# Altair, Bellatrix, and Capella
template updateParticipationFlags(epoch_participation: untyped) =
let proposer_reward = get_proposer_reward(
state, attestation, base_reward_per_increment, cache, epoch_participation)
increase_balance(state, proposer_index, proposer_reward)
when state is phase0.BeaconState:
template addPendingAttestation(attestations: typed) =
# The genericSeqAssign generated by the compiler to copy the attestation
# data sadly is a processing hotspot - the business with the addDefault
# pointer is here simply to work around the poor codegen
let pa = attestations.addDefault()
if pa.isNil:
return err("process_attestation: too many pending attestations")
assign(pa[].aggregation_bits, attestation.aggregation_bits)
pa[].data = attestation.data
pa[].inclusion_delay = state.slot - attestation.data.slot
pa[].proposer_index = proposer_index.uint64
doAssert base_reward_per_increment == 0.Gwei
if attestation.data.target.epoch == get_current_epoch(state):
addPendingAttestation(state.current_epoch_attestations)
@@ -692,6 +727,11 @@ proc process_attestation*(
addPendingAttestation(state.previous_epoch_attestations)
elif state is altair.BeaconState or state is bellatrix.BeaconState or
state is capella.BeaconState or state is deneb.BeaconState:
template updateParticipationFlags(epoch_participation: untyped) =
let proposer_reward = get_proposer_reward(
state, attestation, base_reward_per_increment, cache, epoch_participation)
increase_balance(state, proposer_index, proposer_reward)
doAssert base_reward_per_increment > 0.Gwei
if attestation.data.target.epoch == get_current_epoch(state):
updateParticipationFlags(state.current_epoch_participation)
@@ -740,7 +780,7 @@ func get_next_sync_committee_keys(
i += 1'u64
res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential
func has_eth1_withdrawal_credential*(validator: Validator): bool =
## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential.
validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
@@ -1059,7 +1099,7 @@ proc initialize_hashed_beacon_state_from_eth1*(
execution_payload_header, flags))
result.root = hash_tree_root(result.data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/fork.md#upgrading-the-state
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/fork.md#upgrading-the-state
func translate_participation(
state: var altair.BeaconState,
pending_attestations: openArray[phase0.PendingAttestation]) =
@@ -1145,7 +1185,7 @@ func upgrade_to_altair*(cfg: RuntimeConfig, pre: phase0.BeaconState):
post
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/fork.md#upgrading-the-state
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/fork.md#upgrading-the-state
func upgrade_to_bellatrix*(cfg: RuntimeConfig, pre: altair.BeaconState):
ref bellatrix.BeaconState =
let epoch = get_current_epoch(pre)
@@ -1202,7 +1242,7 @@ func upgrade_to_bellatrix*(cfg: RuntimeConfig, pre: altair.BeaconState):
latest_execution_payload_header: default(bellatrix.ExecutionPayloadHeader)
)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/fork.md#upgrading-the-state
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/fork.md#upgrading-the-state
func upgrade_to_capella*(cfg: RuntimeConfig, pre: bellatrix.BeaconState):
ref capella.BeaconState =
let

View File

@@ -203,7 +203,7 @@ func finish*(agg: AggregateSignature): CookedSig {.inline.} =
sig.finish(agg)
CookedSig(sig)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#bls-signatures
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#bls-signatures
func blsVerify*(
pubkey: CookedPubKey, message: openArray[byte],
signature: CookedSig): bool =
@@ -216,7 +216,7 @@ func blsVerify*(
## to enforce correct usage.
PublicKey(pubkey).verify(message, blscurve.Signature(signature))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#bls-signatures
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#bls-signatures
proc blsVerify*(
pubkey: ValidatorPubKey, message: openArray[byte],
signature: CookedSig): bool =

View File

@@ -38,16 +38,16 @@ const
PARTICIPATION_FLAG_WEIGHTS* =
[TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#misc
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#misc
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16
SYNC_COMMITTEE_SUBNET_COUNT* = 4
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#constants
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#constants
# All of these indices are rooted in `BeaconState`.
# The first member (`genesis_time`) is 32, subsequent members +1 each.
# If there are ever more than 32 members in `BeaconState`, indices change!
# `FINALIZED_ROOT_INDEX` is one layer deeper, i.e., `52 * 2 + 1`.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/ssz/merkle-proofs.md
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/ssz/merkle-proofs.md
FINALIZED_ROOT_INDEX* = 105.GeneralizedIndex # `finalized_checkpoint` > `root`
CURRENT_SYNC_COMMITTEE_INDEX* = 54.GeneralizedIndex # `current_sync_committee`
NEXT_SYNC_COMMITTEE_INDEX* = 55.GeneralizedIndex # `next_sync_committee`
@@ -80,7 +80,7 @@ type
## effectively making the cost of clearing the cache higher than the typical
## gains
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#syncaggregate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#syncaggregate
SyncAggregate* = object
sync_committee_bits*: BitArray[SYNC_COMMITTEE_SIZE]
sync_committee_signature*: ValidatorSig
@@ -94,7 +94,7 @@ type
pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey]
aggregate_pubkey*: ValidatorPubKey
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#synccommitteemessage
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#synccommitteemessage
SyncCommitteeMessage* = object
slot*: Slot
## Slot to which this contribution pertains
@@ -108,7 +108,7 @@ type
signature*: ValidatorSig
## Signature by the validator over the block root of `slot`
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#synccommitteecontribution
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#synccommitteecontribution
SyncCommitteeAggregationBits* =
BitArray[SYNC_SUBCOMMITTEE_SIZE]
@@ -130,18 +130,18 @@ type
signature*: ValidatorSig
## Signature by the validator(s) over the block root of `slot`
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#contributionandproof
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#contributionandproof
ContributionAndProof* = object
aggregator_index*: uint64 # `ValidatorIndex` after validation
contribution*: SyncCommitteeContribution
selection_proof*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#signedcontributionandproof
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#signedcontributionandproof
SignedContributionAndProof* = object
message*: ContributionAndProof
signature*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#syncaggregatorselectiondata
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#syncaggregatorselectiondata
SyncAggregatorSelectionData* = object
slot*: Slot
subcommittee_index*: uint64 # `SyncSubcommitteeIndex` after validation
@@ -157,12 +157,12 @@ type
NextSyncCommitteeBranch* =
array[log2trunc(NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientheader
LightClientHeader* = object
beacon*: BeaconBlockHeader
## Beacon block header
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
LightClientBootstrap* = object
header*: LightClientHeader
## Header matching the requested beacon block root
@@ -171,7 +171,7 @@ type
## Current sync committee corresponding to `header.beacon.state_root`
current_sync_committee_branch*: CurrentSyncCommitteeBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientupdate
LightClientUpdate* = object
attested_header*: LightClientHeader
## Header attested to by the sync committee
@@ -190,7 +190,7 @@ type
signature_slot*: Slot
## Slot at which the aggregate signature was created (untrusted)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
LightClientFinalityUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@@ -204,7 +204,7 @@ type
# Slot at which the aggregate signature was created (untrusted)
signature_slot*: Slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
LightClientOptimisticUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@@ -230,7 +230,7 @@ type
LightClientBootstrap |
SomeLightClientUpdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientstore
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientstore
LightClientStore* = object
finalized_header*: LightClientHeader
## Header that is finalized
@@ -339,7 +339,7 @@ type
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#beaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to
@@ -396,7 +396,7 @@ type
state_root*: Eth2Digest
body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#beaconblockbody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object
randao_reveal*: ValidatorSig
eth1_data*: Eth1Data
@@ -448,7 +448,7 @@ type
SyncnetBits* = BitArray[SYNC_COMMITTEE_SUBNET_COUNT]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/p2p-interface.md#metadata
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/p2p-interface.md#metadata
MetaData* = object
seq_number*: uint64
attnets*: AttnetBits
@@ -651,7 +651,7 @@ chronicles.formatIt SyncCommitteeContribution: shortLog(it)
chronicles.formatIt ContributionAndProof: shortLog(it)
chronicles.formatIt SignedContributionAndProof: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_valid_light_client_header
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#is_valid_light_client_header
func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool =
true

View File

@@ -74,7 +74,7 @@ export
tables, results, json_serialization, timer, sszTypes, beacon_time, crypto,
digest, presets
const SPEC_VERSION* = "1.4.0-alpha.3"
const SPEC_VERSION* = "1.4.0-beta.0"
## Spec version we're aiming to be compatible with, right now
const
@@ -82,7 +82,7 @@ const
ZERO_HASH* = Eth2Digest()
MAX_GRAFFITI_SIZE* = 32
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#configuration
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#configuration
MAXIMUM_GOSSIP_CLOCK_DISPARITY* = 500.millis
SLOTS_PER_ETH1_VOTING_PERIOD* =
@@ -191,7 +191,7 @@ type
# SSZ / hashing purposes
JustificationBits* = distinct uint8
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#proposerslashing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#proposerslashing
ProposerSlashing* = object
signed_header_1*: SignedBeaconBlockHeader
signed_header_2*: SignedBeaconBlockHeader
@@ -203,7 +203,7 @@ type
signed_header_1*: TrustedSignedBeaconBlockHeader
signed_header_2*: TrustedSignedBeaconBlockHeader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#attesterslashing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#attesterslashing
AttesterSlashing* = object
attestation_1*: IndexedAttestation
attestation_2*: IndexedAttestation
@@ -215,7 +215,7 @@ type
attestation_1*: TrustedIndexedAttestation
attestation_2*: TrustedIndexedAttestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#indexedattestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#indexedattestation
IndexedAttestation* = object
attesting_indices*: List[uint64, Limit MAX_VALIDATORS_PER_COMMITTEE]
data*: AttestationData
@@ -231,7 +231,7 @@ type
CommitteeValidatorsBits* = BitList[Limit MAX_VALIDATORS_PER_COMMITTEE]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#attestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#attestation
Attestation* = object
aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData
@@ -270,7 +270,7 @@ type
source*: Checkpoint
target*: Checkpoint
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#deposit
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#deposit
Deposit* = object
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH + 1, Eth2Digest]
## Merkle path to deposit root
@@ -321,7 +321,7 @@ type
pubkey*: CookedPubKey
withdrawal_credentials*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#validator
Validator* = object
pubkey*: ValidatorPubKey
@@ -352,7 +352,7 @@ type
proposer_index*: uint64 # `ValidatorIndex` after validation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#historicalbatch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#historicalbatch
HistoricalBatch* = object
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
@@ -371,7 +371,7 @@ type
deposit_count*: uint64
block_hash*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#signedvoluntaryexit
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#signedvoluntaryexit
SignedVoluntaryExit* = object
message*: VoluntaryExit
signature*: ValidatorSig
@@ -388,7 +388,7 @@ type
state_root*: Eth2Digest
body_root*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#signingdata
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#signingdata
SigningData* = object
object_root*: Eth2Digest
domain*: Eth2Domain
@@ -404,13 +404,13 @@ type
message*: BeaconBlockHeader
signature*: TrustedSig
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#aggregateandproof
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#aggregateandproof
AggregateAndProof* = object
aggregator_index*: uint64 # `ValidatorIndex` after validation
aggregate*: Attestation
selection_proof*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#signedaggregateandproof
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#signedaggregateandproof
SignedAggregateAndProof* = object
message*: AggregateAndProof
signature*: ValidatorSig
@@ -428,7 +428,7 @@ type
sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache]
# This matches the mutable state of the Solidity deposit contract
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/solidity_deposit_contract/deposit_contract.sol
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/solidity_deposit_contract/deposit_contract.sol
DepositContractState* = object
branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest]
deposit_count*: array[32, byte] # Uint256
@@ -459,7 +459,7 @@ type
withdrawable_epoch*: Epoch
## When validator can withdraw funds
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#validator
ValidatorStatusCapella* = object
# This is a validator without the expensive, immutable, append-only parts
# serialized. They're represented in memory to allow in-place SSZ reading
@@ -595,7 +595,7 @@ template makeLimitedUInt*(T: untyped, limit: SomeUnsignedInt) =
template asUInt64*(x: T): uint64 = uint64(distinctBase(x))
template toSszType(x: T): uint64 =
{.error: "Limited types should not be used with SSZ (abi differences)".}
{.error: "Limited types should not be used with SSZ (ABI differences)".}
template makeLimitedU8*(T: untyped, limit: uint8) =
makeLimitedUInt(T, limit)
@@ -650,7 +650,8 @@ proc readValue*(
raiseUnexpectedValue(reader, "Hex string of 4 bytes expected")
func `$`*(x: JustificationBits): string =
"0x" & toHex(uint8(x))
# TODO, works around https://github.com/nim-lang/Nim/issues/22191
"0x" & toHex(uint64(uint8(x)))
proc readValue*(reader: var JsonReader, value: var JustificationBits)
{.raises: [IOError, SerializationError, Defect].} =

View File

@@ -34,7 +34,7 @@ const
NEWPAYLOAD_TIMEOUT* = 8.seconds
type
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#custom-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#custom-types
Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION]
ExecutionAddress* = object
@@ -45,7 +45,7 @@ type
PayloadID* = array[8, byte]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#executionpayload
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#executionpayload
ExecutionPayload* = object
# Execution block header fields
parent_hash*: Eth2Digest
@@ -73,7 +73,7 @@ type
executionPayload*: ExecutionPayload
blockValue*: Wei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#executionpayloadheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#executionpayloadheader
ExecutionPayloadHeader* = object
# Execution block header fields
parent_hash*: Eth2Digest
@@ -97,13 +97,13 @@ type
ExecutePayload* = proc(
execution_payload: ExecutionPayload): bool {.gcsafe, raises: [Defect].}
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/fork-choice.md#powblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/fork-choice.md#powblock
PowBlock* = object
block_hash*: Eth2Digest
parent_hash*: Eth2Digest
total_difficulty*: Eth2Digest # uint256
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#beaconstate
BeaconState* = object
# Versioning
genesis_time*: uint64
@@ -228,7 +228,7 @@ type
state_root*: Eth2Digest
body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/beacon-chain.md#beaconblockbody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object
randao_reveal*: ValidatorSig
eth1_data*: Eth1Data
@@ -306,7 +306,7 @@ type
# Execution
execution_payload*: ExecutionPayload # [New in Bellatrix]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#signedbeaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object
message*: BeaconBlock
signature*: ValidatorSig

View File

@@ -27,43 +27,43 @@ import
export json_serialization, base
const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/sync-protocol.md#constants
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/sync-protocol.md#constants
# This index is rooted in `BeaconBlockBody`.
# The first member (`randao_reveal`) is 16, subsequent members +1 each.
# If there are ever more than 16 members in `BeaconBlockBody`, indices change!
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/ssz/merkle-proofs.md
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/ssz/merkle-proofs.md
EXECUTION_PAYLOAD_INDEX* = 25.GeneralizedIndex # `execution_payload`
type
SignedBLSToExecutionChangeList* =
List[SignedBLSToExecutionChange, Limit MAX_BLS_TO_EXECUTION_CHANGES]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#withdrawal
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#withdrawal
Withdrawal* = object
index*: WithdrawalIndex
validator_index*: uint64
address*: ExecutionAddress
amount*: Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#blstoexecutionchange
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#blstoexecutionchange
BLSToExecutionChange* = object
validator_index*: uint64
from_bls_pubkey*: ValidatorPubKey
to_execution_address*: ExecutionAddress
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#signedblstoexecutionchange
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#signedblstoexecutionchange
SignedBLSToExecutionChange* = object
message*: BLSToExecutionChange
signature*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#historicalsummary
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#historicalsummary
HistoricalSummary* = object
# `HistoricalSummary` matches the components of the phase0
# `HistoricalBatch` making the two hash_tree_root-compatible.
block_summary_root*: Eth2Digest
state_summary_root*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#executionpayload
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#executionpayload
ExecutionPayload* = object
# Execution block header fields
parent_hash*: Eth2Digest
@@ -93,7 +93,7 @@ type
executionPayload*: ExecutionPayload
blockValue*: Wei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#executionpayloadheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#executionpayloadheader
ExecutionPayloadHeader* = object
# Execution block header fields
parent_hash*: Eth2Digest
@@ -122,7 +122,7 @@ type
ExecutionBranch* =
array[log2trunc(EXECUTION_PAYLOAD_INDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
LightClientHeader* = object
beacon*: BeaconBlockHeader
## Beacon block header
@@ -131,7 +131,7 @@ type
## Execution payload header corresponding to `beacon.body_root` (from Capella onward)
execution_branch*: ExecutionBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
LightClientBootstrap* = object
header*: LightClientHeader
## Header matching the requested beacon block root
@@ -140,7 +140,7 @@ type
## Current sync committee corresponding to `header.beacon.state_root`
current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientupdate
LightClientUpdate* = object
attested_header*: LightClientHeader
## Header attested to by the sync committee
@@ -159,7 +159,7 @@ type
signature_slot*: Slot
## Slot at which the aggregate signature was created (untrusted)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
LightClientFinalityUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@@ -173,7 +173,7 @@ type
# Slot at which the aggregate signature was created (untrusted)
signature_slot*: Slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
LightClientOptimisticUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@@ -199,7 +199,7 @@ type
LightClientBootstrap |
SomeLightClientUpdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientstore
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientstore
LightClientStore* = object
finalized_header*: LightClientHeader
## Header that is finalized
@@ -220,7 +220,7 @@ type
## (used to compute safety threshold)
current_max_active_participants*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#beaconstate
BeaconState* = object
# Versioning
genesis_time*: uint64
@@ -518,8 +518,8 @@ type
latest_block_header*: BeaconBlockHeader
# Mod-increment/circular
block_roots*: array[SLOTS_PER_EPOCH, Eth2Digest]
state_roots*: array[SLOTS_PER_EPOCH, Eth2Digest]
block_roots*: array[SLOTS_PER_EPOCH.int, Eth2Digest]
state_roots*: array[SLOTS_PER_EPOCH.int, Eth2Digest]
# Replace
eth1_data*: Eth1Data
@@ -650,7 +650,7 @@ func shortLog*(v: SignedBLSToExecutionChange): auto =
signature: shortLog(v.signature)
)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/sync-protocol.md#get_lc_execution_root
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/sync-protocol.md#get_lc_execution_root
func get_lc_execution_root*(
header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest =
let epoch = header.beacon.slot.epoch
@@ -660,7 +660,7 @@ func get_lc_execution_root*(
ZERO_HASH
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/sync-protocol.md#modified-is_valid_light_client_header
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/sync-protocol.md#modified-is_valid_light_client_header
func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool =
let epoch = header.beacon.slot.epoch
@@ -677,13 +677,13 @@ func is_valid_light_client_header*(
get_subtree_index(EXECUTION_PAYLOAD_INDEX),
header.beacon.body_root)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_header_to_capella*(
pre: altair.LightClientHeader): LightClientHeader =
LightClientHeader(
beacon: pre.beacon)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_bootstrap_to_capella*(
pre: altair.LightClientBootstrap): LightClientBootstrap =
LightClientBootstrap(
@@ -691,7 +691,7 @@ func upgrade_lc_bootstrap_to_capella*(
current_sync_committee: pre.current_sync_committee,
current_sync_committee_branch: pre.current_sync_committee_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_update_to_capella*(
pre: altair.LightClientUpdate): LightClientUpdate =
LightClientUpdate(
@@ -703,7 +703,7 @@ func upgrade_lc_update_to_capella*(
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_finality_update_to_capella*(
pre: altair.LightClientFinalityUpdate): LightClientFinalityUpdate =
LightClientFinalityUpdate(
@@ -713,7 +713,7 @@ func upgrade_lc_finality_update_to_capella*(
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_optimistic_update_to_capella*(
pre: altair.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
LightClientOptimisticUpdate(
@@ -764,7 +764,7 @@ chronicles.formatIt LightClientUpdate: shortLog(it)
chronicles.formatIt LightClientFinalityUpdate: shortLog(it)
chronicles.formatIt LightClientOptimisticUpdate: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/fork.md#upgrading-the-store
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/fork.md#upgrading-the-store
func upgrade_lc_store_to_capella*(
pre: altair.LightClientStore): LightClientStore =
let best_valid_update =

View File

@@ -15,6 +15,21 @@ type
DomainType* = distinct array[4, byte]
const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#constants
NODE_ID_BITS* = 256
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#configuration
EPOCHS_PER_SUBNET_SUBSCRIPTION* = 256
SUBNETS_PER_NODE* = 2'u64
ATTESTATION_SUBNET_COUNT*: uint64 = 64
ATTESTATION_SUBNET_EXTRA_BITS* = 0
ATTESTATION_SUBNET_PREFIX_BITS* = 6 ## \
## int(ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS)
static: doAssert 1 shl (ATTESTATION_SUBNET_PREFIX_BITS - ATTESTATION_SUBNET_EXTRA_BITS) ==
ATTESTATION_SUBNET_COUNT
const
# 2^64 - 1 in spec
FAR_FUTURE_SLOT* = Slot(not 0'u64)
@@ -36,17 +51,17 @@ const
DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF* = DomainType([byte 0x08, 0x00, 0x00, 0x00])
DOMAIN_CONTRIBUTION_AND_PROOF* = DomainType([byte 0x09, 0x00, 0x00, 0x00])
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#domain-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#domain-types
DOMAIN_BLS_TO_EXECUTION_CHANGE* = DomainType([byte 0x0a, 0x00, 0x00, 0x00])
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#domain-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/beacon-chain.md#domain-types
DOMAIN_BLOB_SIDECAR* = DomainType([byte 0x0b, 0x00, 0x00, 0x00])
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/beacon-chain.md#transition-settings
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH* = FAR_FUTURE_EPOCH
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/fork-choice.md#configuration
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/fork-choice.md#configuration
PROPOSER_SCORE_BOOST*: uint64 = 40
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/p2p-interface.md#configuration
ATTESTATION_SUBNET_COUNT*: uint64 = 64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/p2p-interface.md#configuration
BLOB_SIDECAR_SUBNET_COUNT*: uint64 = 6

View File

@@ -25,12 +25,12 @@ import
../digest,
"."/[base, phase0, altair, bellatrix, capella]
from ../../vendor/nim-kzg4844/kzg4844 import KzgCommitment, KzgProof
from kzg4844 import KzgCommitment, KzgProof
export json_serialization, base, kzg4844
const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/deneb/polynomial-commitments.md#constants
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/polynomial-commitments.md#constants
BYTES_PER_FIELD_ELEMENT = 32
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#blob
@@ -109,6 +109,7 @@ type
executionPayload*: ExecutionPayload
blockValue*: Wei
kzgs*: KzgCommitments
proofs*:seq[KZGProof]
blobs*: Blobs
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#executionpayloadheader
@@ -138,7 +139,7 @@ type
ExecutePayload* = proc(
execution_payload: ExecutionPayload): bool {.gcsafe, raises: [Defect].}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
LightClientHeader* = object
beacon*: BeaconBlockHeader
## Beacon block header
@@ -147,7 +148,7 @@ type
## Execution payload header corresponding to `beacon.body_root` (from Capella onward)
execution_branch*: capella.ExecutionBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
LightClientBootstrap* = object
header*: LightClientHeader
## Header matching the requested beacon block root
@@ -156,7 +157,7 @@ type
## Current sync committee corresponding to `header.beacon.state_root`
current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientupdate
LightClientUpdate* = object
attested_header*: LightClientHeader
## Header attested to by the sync committee
@@ -175,7 +176,7 @@ type
signature_slot*: Slot
## Slot at which the aggregate signature was created (untrusted)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
LightClientFinalityUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@@ -189,7 +190,7 @@ type
# Slot at which the aggregate signature was created (untrusted)
signature_slot*: Slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
LightClientOptimisticUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@@ -215,7 +216,7 @@ type
LightClientBootstrap |
SomeLightClientUpdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientstore
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#lightclientstore
LightClientStore* = object
finalized_header*: LightClientHeader
## Header that is finalized
@@ -314,7 +315,7 @@ type
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#beaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to
@@ -626,7 +627,7 @@ func is_valid_light_client_header*(
get_subtree_index(EXECUTION_PAYLOAD_INDEX),
header.beacon.body_root)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_header_to_deneb*(
pre: capella.LightClientHeader): LightClientHeader =
LightClientHeader(
@@ -681,7 +682,7 @@ func upgrade_lc_finality_update_to_deneb*(
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_optimistic_update_to_deneb*(
pre: capella.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
LightClientOptimisticUpdate(

View File

@@ -111,7 +111,7 @@ type
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#beaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to
@@ -167,7 +167,7 @@ type
state_root*: Eth2Digest
body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#beaconblockbody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object
randao_reveal*: ValidatorSig
eth1_data*: Eth1Data
@@ -219,7 +219,7 @@ type
deposits*: List[Deposit, Limit MAX_DEPOSITS]
voluntary_exits*: List[TrustedSignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#signedbeaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object
message*: BeaconBlock
signature*: ValidatorSig

View File

@@ -104,13 +104,15 @@ type
capella_mev.SignedBlindedBeaconBlock |
SignedValidatorRegistrationV1 |
SignedVoluntaryExit |
Web3SignerRequest
Web3SignerRequest |
RestNimbusTimestamp1
EncodeOctetTypes* =
altair.SignedBeaconBlock |
bellatrix.SignedBeaconBlock |
capella.SignedBeaconBlock |
phase0.SignedBeaconBlock
phase0.SignedBeaconBlock |
DenebSignedBlockContents
EncodeArrays* =
seq[Attestation] |
@@ -150,7 +152,9 @@ type
GetStateRootResponse |
GetBlockRootResponse |
SomeForkedLightClientObject |
seq[SomeForkedLightClientObject]
seq[SomeForkedLightClientObject] |
RestNimbusTimestamp1 |
RestNimbusTimestamp2
DecodeConsensysTypes* =
ProduceBlockResponseV2 | ProduceBlindedBlockResponse
@@ -666,7 +670,7 @@ proc readValue*(reader: var JsonReader[RestJson],
for e in reader.readArray(string):
let parsed = try:
parseBiggestUInt(e)
except ValueError as err:
except ValueError:
reader.raiseUnexpectedValue(
"A string-encoded 8-bit usigned integer value expected")
@@ -1196,103 +1200,30 @@ proc readValue*[BlockType: Web3SignerForkedBeaconBlock](
prepareForkedBlockReading(reader, version, data,
"Web3SignerForkedBeaconBlock")
case version.get():
of ConsensusFork.Phase0:
let res =
try:
some(RestJson.decode(string(data.get()),
phase0.BeaconBlock,
requireAllFields = true,
allowUnknownFields = true))
except SerializationError:
none[phase0.BeaconBlock]()
if res.isNone():
reader.raiseUnexpectedValue("Incorrect phase0 block format")
value = Web3SignerForkedBeaconBlock(
kind: ConsensusFork.Phase0,
phase0Data: res.get())
of ConsensusFork.Altair:
let res =
try:
some(RestJson.decode(string(data.get()),
altair.BeaconBlock,
requireAllFields = true,
allowUnknownFields = true))
except SerializationError:
none[altair.BeaconBlock]()
if res.isNone():
reader.raiseUnexpectedValue("Incorrect altair block format")
value = Web3SignerForkedBeaconBlock(
kind: ConsensusFork.Altair,
altairData: res.get())
of ConsensusFork.Bellatrix:
let res =
try:
some(RestJson.decode(string(data.get()),
BeaconBlockHeader,
requireAllFields = true,
allowUnknownFields = true))
except SerializationError:
none[BeaconBlockHeader]()
if res.isNone():
reader.raiseUnexpectedValue("Incorrect bellatrix block format")
value = Web3SignerForkedBeaconBlock(
kind: ConsensusFork.Bellatrix,
bellatrixData: res.get())
of ConsensusFork.Capella:
let res =
try:
some(RestJson.decode(string(data.get()),
BeaconBlockHeader,
requireAllFields = true,
allowUnknownFields = true))
except SerializationError:
none[BeaconBlockHeader]()
if res.isNone():
reader.raiseUnexpectedValue("Incorrect capella block format")
value = Web3SignerForkedBeaconBlock(
kind: ConsensusFork.Capella,
capellaData: res.get())
of ConsensusFork.Deneb:
let res =
try:
some(RestJson.decode(string(data.get()),
BeaconBlockHeader,
requireAllFields = true,
allowUnknownFields = true))
except SerializationError:
none[BeaconBlockHeader]()
if res.isNone():
reader.raiseUnexpectedValue("Incorrect deneb block format")
value = Web3SignerForkedBeaconBlock(
kind: ConsensusFork.Deneb,
denebData: res.get())
let res =
try:
some(RestJson.decode(string(data.get()),
BeaconBlockHeader,
requireAllFields = true,
allowUnknownFields = true))
except SerializationError:
none[BeaconBlockHeader]()
if res.isNone():
reader.raiseUnexpectedValue("Incorrect block header format")
if version.get() <= ConsensusFork.Altair:
reader.raiseUnexpectedValue(
"Web3Signer implementation supports Bellatrix and newer")
value = Web3SignerForkedBeaconBlock(kind: version.get(), data: res.get())
proc writeValue*[
BlockType: Web3SignerForkedBeaconBlock](
writer: var JsonWriter[RestJson],
value: BlockType) {.raises: [IOError, Defect].} =
template forkIdentifier(id: string): auto = (static toUpperAscii id)
# https://consensys.github.io/web3signer/web3signer-eth2.html#tag/Signing/operation/ETH2_SIGN
# https://github.com/ConsenSys/web3signer/blob/d51337e96ba5ce410222943556bed7c4856b8e57/core/src/main/java/tech/pegasys/web3signer/core/service/http/handlers/signing/eth2/json/BlockRequestDeserializer.java#L42-L58
writer.beginRecord()
case value.kind
of ConsensusFork.Phase0:
writer.writeField("version", forkIdentifier "phase0")
writer.writeField("block", value.phase0Data)
of ConsensusFork.Altair:
writer.writeField("version", forkIdentifier "altair")
writer.writeField("block", value.altairData)
of ConsensusFork.Bellatrix:
writer.writeField("version", forkIdentifier "bellatrix")
writer.writeField("block_header", value.bellatrixData)
of ConsensusFork.Capella:
writer.writeField("version", forkIdentifier "capella")
writer.writeField("block_header", value.capellaData)
of ConsensusFork.Deneb:
writer.writeField("version", forkIdentifier "deneb")
writer.writeField("block_header", value.denebData)
writer.writeField("version", value.kind.toString.toUpperAscii)
writer.writeField("block", value.data)
writer.endRecord()
proc writeValue*[
@@ -1535,7 +1466,32 @@ proc readValue*(reader: var JsonReader[RestJson],
value.capellaBody.execution_payload.withdrawals,
ep_src.withdrawals.get())
of ConsensusFork.Deneb:
reader.raiseUnexpectedValue($denebImplementationMissing)
value = RestPublishedBeaconBlockBody(
kind: ConsensusFork.Deneb,
denebBody: deneb.BeaconBlockBody(
randao_reveal: randao_reveal.get(),
eth1_data: eth1_data.get(),
graffiti: graffiti.get(),
proposer_slashings: proposer_slashings.get(),
attester_slashings: attester_slashings.get(),
attestations: attestations.get(),
deposits: deposits.get(),
voluntary_exits: voluntary_exits.get(),
sync_aggregate: sync_aggregate.get(),
bls_to_execution_changes: bls_to_execution_changes.get(),
blob_kzg_commitments: blob_kzg_commitments.get()
)
)
copy_ep_bellatrix(value.denebBody.execution_payload)
assign(
value.denebBody.execution_payload.withdrawals,
ep_src.withdrawals.get())
assign(
value.denebBody.execution_payload.data_gas_used,
ep_src.data_gas_used.get())
assign(
value.denebBody.execution_payload.excess_data_gas,
ep_src.excess_data_gas.get())
## RestPublishedBeaconBlock
proc readValue*(reader: var JsonReader[RestJson],
@@ -2751,7 +2707,7 @@ proc readValue*(reader: var JsonReader[RestJson],
let key =
try:
parseKeystore(item)
except SerializationError as exc:
except SerializationError:
# TODO re-raise the exception by adjusting the column index, so the user
# will get an accurate syntax error within the larger message
reader.raiseUnexpectedValue("Invalid keystore format")
@@ -2766,7 +2722,7 @@ proc readValue*(reader: var JsonReader[RestJson],
SPDIR,
requireAllFields = true,
allowUnknownFields = true)
except SerializationError as exc:
except SerializationError:
reader.raiseUnexpectedValue("Invalid slashing protection format")
some(db)
else:
@@ -2992,6 +2948,16 @@ proc readValue*(reader: var JsonReader[RestJson],
stacktraces: stacktraces
)
## VCRuntimeConfig
proc readValue*(reader: var JsonReader[RestJson],
value: var VCRuntimeConfig) {.
raises: [SerializationError, IOError, Defect].} =
for fieldName in readObjectFields(reader):
let fieldValue = reader.readValue(string)
if value.hasKeyOrPut(toUpperAscii(fieldName), fieldValue):
let msg = "Multiple `" & fieldName & "` fields found"
reader.raiseUnexpectedField(msg, "VCRuntimeConfig")
proc parseRoot(value: string): Result[Eth2Digest, cstring] =
try:
ok(Eth2Digest(data: hexToByteArray[32](value)))

View File

@@ -167,6 +167,11 @@ proc publishBlock*(body: capella.SignedBeaconBlock): RestPlainResponse {.
meth: MethodPost.}
## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock
proc publishBlock*(body: DenebSignedBlockContents): RestPlainResponse {.
rest, endpoint: "/eth/v1/beacon/blocks",
meth: MethodPost.}
## https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlock
proc publishSszBlock*(
client: RestClientRef,
blck: ForkySignedBeaconBlock

View File

@@ -10,6 +10,9 @@ import
chronos, presto/client,
"."/[rest_types, eth2_rest_serialization]
from std/times import Time, DateTime, toTime, fromUnix, now, utc, `-`,
inNanoseconds
export chronos, client, rest_types, eth2_rest_serialization
proc raiseGenericError*(resp: RestPlainResponse) {.
@@ -52,3 +55,6 @@ proc getBodyBytesWithCap*(
if not(isNil(reader)):
await reader.closeWait()
raise newHttpReadError("Could not read response")
proc getTimestamp*(): uint64 =
uint64((toTime(now().utc) - fromUnix(0)).inNanoseconds())

View File

@@ -56,7 +56,7 @@ func decodeSszLightClientObject[T: SomeForkedLightClientObject](
except SerializationError as exc:
raiseRestDecodingBytesError(cstring("Malformed data: " & $exc.msg))
proc decodeJsonLightClientObject[T: SomeForkedLightClientObject](
proc decodeJsonLightClientObject*[T: SomeForkedLightClientObject](
x: typedesc[T],
data: openArray[byte],
consensusFork: Opt[ConsensusFork],

View File

@@ -1,4 +1,4 @@
# Copyright (c) 2018-2022 Status Research & Development GmbH
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@@ -7,11 +7,71 @@
{.push raises: [Defect].}
import
chronos, presto/client,
"."/[rest_types, eth2_rest_serialization]
chronos, chronicles, presto/client,
"."/[rest_types, eth2_rest_serialization, rest_common]
proc getValidatorsActivity*(epoch: Epoch,
body: seq[ValidatorIndex]
): RestPlainResponse {.
rest, endpoint: "/nimbus/v1/validator/activity/{epoch}",
meth: MethodPost.}
proc getTimesyncInifo*(body: RestNimbusTimestamp1): RestPlainResponse {.
rest, endpoint: "/nimbus/v1/timesync", meth: MethodPost.}
proc getTimeOffset*(client: RestClientRef,
delay: Duration): Future[int64] {.async.} =
let
timestamp1 = getTimestamp()
data = RestNimbusTimestamp1(timestamp1: timestamp1)
resp = await client.getTimesyncInifo(data)
timestamp4 = getTimestamp()
return
case resp.status
of 200:
if resp.contentType.isNone() or
isWildCard(resp.contentType.get().mediaType) or
resp.contentType.get().mediaType != ApplicationJsonMediaType:
raise newException(RestError, "Missing or incorrect Content-Type")
let stamps = decodeBytes(RestNimbusTimestamp2, resp.data,
resp.contentType).valueOr:
raise newException(RestError, $error)
trace "Time offset data",
timestamp1 = timestamp1,
timestamp2 = stamps.timestamp2,
timestamp3 = stamps.timestamp3,
timestamp4 = timestamp4,
delay14 = delay.nanoseconds,
delay23 = stamps.delay
# t1 - time when we sent request.
# t2 - time when remote server received request.
# t3 - time when remote server sent response.
# t4 - time when we received response.
# delay14 = validator client processing delay.
# delay23 = beacon node processing delay.
#
# Round-trip network delay `delta` = (t4 - t1) - (t3 - t2)
# but with delays this will be:
# `delta` = (t4 - t1 + delay14) - (t3 - t2 + delay23)
# Estimated server time is t3 + (delta div 2)
# Estimated clock skew `theta` = t3 + (delta div 2) - t4
let
delay14 = delay.nanoseconds
delay23 = int64(stamps.delay)
offset = (int64(stamps.timestamp2) - int64(timestamp1) +
int64(stamps.timestamp3) - int64(timestamp4) +
delay14 - delay23) div 2
offset
else:
let error = decodeBytes(RestErrorMessage, resp.data,
resp.contentType).valueOr:
let msg = "Incorrect response error format (" & $resp.status &
") [" & $error & "]"
raise (ref RestResponseError)(msg: msg, status: resp.status)
let msg = "Error response (" & $resp.status & ") [" & error.message & "]"
raise (ref RestResponseError)(
msg: msg, status: error.code, message: error.message)

View File

@@ -14,15 +14,16 @@
{.push raises: [].}
import
std/json,
std/[json, tables],
stew/base10, web3/ethtypes,
".."/forks,
".."/datatypes/[phase0, altair, bellatrix, deneb],
".."/mev/[bellatrix_mev, capella_mev]
".."/mev/[bellatrix_mev, capella_mev, deneb_mev]
from ".."/datatypes/capella import BeaconBlockBody
export forks, phase0, altair, bellatrix, capella, bellatrix_mev, capella_mev
export forks, phase0, altair, bellatrix, capella, bellatrix_mev, capella_mev,
deneb_mev, tables
const
# https://github.com/ethereum/eth2.0-APIs/blob/master/apis/beacon/states/validator_balances.yaml#L17
@@ -331,7 +332,7 @@ type
DenebBlockContents* = object
`block`*: deneb.BeaconBlock
blob_sidecars*: List[SignedBlobSidecar, Limit MAX_BLOBS_PER_BLOCK]
blob_sidecars*: List[BlobSidecar, Limit MAX_BLOBS_PER_BLOCK]
ProduceBlockResponseV2* = object
case kind*: ConsensusFork
@@ -376,7 +377,7 @@ type
MAX_DEPOSITS*: uint64
MAX_VOLUNTARY_EXITS*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/altair.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/altair.yaml
INACTIVITY_PENALTY_QUOTIENT_ALTAIR*: uint64
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR*: uint64
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR*: uint64
@@ -385,7 +386,7 @@ type
MIN_SYNC_COMMITTEE_PARTICIPANTS*: uint64
UPDATE_TIMEOUT*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/bellatrix.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/bellatrix.yaml
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX*: uint64
MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX*: uint64
PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX*: uint64
@@ -394,7 +395,7 @@ type
BYTES_PER_LOGS_BLOOM*: uint64
MAX_EXTRA_DATA_BYTES*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/capella.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/capella.yaml
MAX_BLS_TO_EXECUTION_CHANGES*: uint64
MAX_WITHDRAWALS_PER_PAYLOAD*: uint64
MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP*: uint64
@@ -474,42 +475,11 @@ type
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION*: uint64
ATTESTATION_SUBNET_COUNT*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#constants
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#constants
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE*: uint64
SYNC_COMMITTEE_SUBNET_COUNT*: uint64
# The `RestSpec` is a dynamic dictionary that includes version-specific spec
# constants. New versions may introduce new constants, and remove old ones.
# The Nimbus validator client fetches the remote spec to determine whether it
# is connected to a compatible beacon node. For this purpose, it only needs to
# verify a small set of relevant spec constants. To avoid rejecting a remote
# spec that includes all of those relevant spec constants, but that does not
# include all of the locally known spec constants, a separate type is defined
# that includes just the spec constants relevant for the validator client.
# Extra spec constants are silently ignored.
RestSpecVC* = object
# /!\ Keep in sync with `validator_client/api.nim` > `checkCompatible`.
MAX_VALIDATORS_PER_COMMITTEE*: uint64
SLOTS_PER_EPOCH*: uint64
SECONDS_PER_SLOT*: uint64
EPOCHS_PER_ETH1_VOTING_PERIOD*: uint64
SLOTS_PER_HISTORICAL_ROOT*: uint64
EPOCHS_PER_HISTORICAL_VECTOR*: uint64
EPOCHS_PER_SLASHINGS_VECTOR*: uint64
HISTORICAL_ROOTS_LIMIT*: uint64
VALIDATOR_REGISTRY_LIMIT*: uint64
MAX_PROPOSER_SLASHINGS*: uint64
MAX_ATTESTER_SLASHINGS*: uint64
MAX_ATTESTATIONS*: uint64
MAX_DEPOSITS*: uint64
MAX_VOLUNTARY_EXITS*: uint64
DOMAIN_BEACON_PROPOSER*: DomainType
DOMAIN_BEACON_ATTESTER*: DomainType
DOMAIN_RANDAO*: DomainType
DOMAIN_DEPOSIT*: DomainType
DOMAIN_VOLUNTARY_EXIT*: DomainType
DOMAIN_SELECTION_PROOF*: DomainType
DOMAIN_AGGREGATE_AND_PROOF*: DomainType
VCRuntimeConfig* = Table[string, string]
RestDepositContract* = object
chain_id*: string
@@ -657,6 +627,15 @@ type
RestRoot* = object
root*: Eth2Digest
RestNimbusTimestamp1* = object
timestamp1*: uint64
RestNimbusTimestamp2* = object
timestamp1*: uint64
timestamp2*: uint64
timestamp3*: uint64
delay*: uint64
# Types based on the OAPI yaml file - used in responses to requests
GetBeaconHeadResponse* = DataEnclosedObject[Slot]
GetAggregatedAttestationResponse* = DataEnclosedObject[Attestation]
@@ -673,6 +652,7 @@ type
GetGenesisResponse* = DataEnclosedObject[RestGenesis]
GetHeaderResponseBellatrix* = DataVersionEnclosedObject[bellatrix_mev.SignedBuilderBid]
GetHeaderResponseCapella* = DataVersionEnclosedObject[capella_mev.SignedBuilderBid]
GetHeaderResponseDeneb* = DataVersionEnclosedObject[deneb_mev.SignedBuilderBid]
GetNetworkIdentityResponse* = DataEnclosedObject[RestNetworkIdentity]
GetPeerCountResponse* = DataMetaEnclosedObject[RestPeerCount]
GetPeerResponse* = DataMetaEnclosedObject[RestNodePeer]
@@ -683,7 +663,7 @@ type
GetPoolVoluntaryExitsResponse* = DataEnclosedObject[seq[SignedVoluntaryExit]]
GetProposerDutiesResponse* = DataRootEnclosedObject[seq[RestProposerDuty]]
GetSpecResponse* = DataEnclosedObject[RestSpec]
GetSpecVCResponse* = DataEnclosedObject[RestSpecVC]
GetSpecVCResponse* = DataEnclosedObject[VCRuntimeConfig]
GetStateFinalityCheckpointsResponse* = DataEnclosedObject[RestBeaconStatesFinalityCheckpoints]
GetStateForkResponse* = DataEnclosedObject[Fork]
GetStateRootResponse* = DataOptimisticObject[RestRoot]
@@ -701,6 +681,7 @@ type
ProduceSyncCommitteeContributionResponse* = DataEnclosedObject[SyncCommitteeContribution]
SubmitBlindedBlockResponseBellatrix* = DataEnclosedObject[bellatrix.ExecutionPayload]
SubmitBlindedBlockResponseCapella* = DataEnclosedObject[capella.ExecutionPayload]
SubmitBlindedBlockResponseDeneb* = DataEnclosedObject[deneb.ExecutionPayload]
GetValidatorsActivityResponse* = DataEnclosedObject[seq[RestActivityItem]]
GetValidatorsLivenessResponse* = DataEnclosedObject[seq[RestLivenessItem]]

View File

@@ -17,12 +17,12 @@ import
block_id, eth2_merkleization, eth2_ssz_serialization,
forks_light_client, presets],
./datatypes/[phase0, altair, bellatrix, capella, deneb],
./mev/bellatrix_mev, ./mev/capella_mev
./mev/bellatrix_mev, ./mev/capella_mev, ./mev/deneb_mev
export
extras, block_id, phase0, altair, bellatrix, capella, deneb,
eth2_merkleization, eth2_ssz_serialization, forks_light_client,
presets, bellatrix_mev, capella_mev
presets, bellatrix_mev, capella_mev, deneb_mev
# This file contains helpers for dealing with forks - we have two ways we can
# deal with forks:
@@ -146,12 +146,8 @@ type
of ConsensusFork.Deneb: denebData*: deneb.BeaconBlock
Web3SignerForkedBeaconBlock* = object
case kind*: ConsensusFork
of ConsensusFork.Phase0: phase0Data*: phase0.BeaconBlock
of ConsensusFork.Altair: altairData*: altair.BeaconBlock
of ConsensusFork.Bellatrix: bellatrixData*: BeaconBlockHeader
of ConsensusFork.Capella: capellaData*: BeaconBlockHeader
of ConsensusFork.Deneb: denebData*: BeaconBlockHeader
kind*: ConsensusFork
data*: BeaconBlockHeader
ForkedBlindedBeaconBlock* = object
case kind*: ConsensusFork
@@ -739,7 +735,7 @@ template asTrusted*(
isomorphicCast[ref ForkedTrustedSignedBeaconBlock](x)
template withBlck*(
x: ForkedBeaconBlock | Web3SignerForkedBeaconBlock |
x: ForkedBeaconBlock |
ForkedSignedBeaconBlock | ForkedMsgTrustedSignedBeaconBlock |
ForkedTrustedSignedBeaconBlock | ForkedBlindedBeaconBlock |
ForkedSignedBlindedBeaconBlock,
@@ -769,10 +765,12 @@ template withBlck*(
func proposer_index*(x: ForkedBeaconBlock): uint64 =
withBlck(x): blck.proposer_index
func hash_tree_root*(x: ForkedBeaconBlock | Web3SignerForkedBeaconBlock):
Eth2Digest =
func hash_tree_root*(x: ForkedBeaconBlock): Eth2Digest =
withBlck(x): hash_tree_root(blck)
func hash_tree_root*(x: Web3SignerForkedBeaconBlock): Eth2Digest =
hash_tree_root(x.data)
template getForkedBlockField*(
x: ForkedSignedBeaconBlock |
ForkedMsgTrustedSignedBeaconBlock |
@@ -853,7 +851,8 @@ template withStateAndBlck*(
func toBeaconBlockHeader*(
blck: SomeForkyBeaconBlock | bellatrix_mev.BlindedBeaconBlock |
capella_mev.BlindedBeaconBlock): BeaconBlockHeader =
capella_mev.BlindedBeaconBlock | deneb_mev.BlindedBeaconBlock):
BeaconBlockHeader =
## Reduce a given `BeaconBlock` to its `BeaconBlockHeader`.
BeaconBlockHeader(
slot: blck.slot,

View File

@@ -816,7 +816,7 @@ func migratingToDataFork*[
upgradedObject.migrateToDataFork(newKind)
upgradedObject
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/full-node.md#block_to_light_client_header
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/full-node.md#block_to_light_client_header
func toAltairLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
@@ -826,7 +826,7 @@ func toAltairLightClientHeader(
altair.LightClientHeader(
beacon: blck.message.toBeaconBlockHeader())
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/light-client/full-node.md#modified-block_to_light_client_header
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/light-client/full-node.md#modified-block_to_light_client_header
func toCapellaLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |

View File

@@ -27,7 +27,7 @@ export
eth2_merkleization, forks, rlp, ssz_codec
func toEther*(gwei: Gwei): Ether =
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/weak-subjectivity.md#constants
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/weak-subjectivity.md#constants
const ETH_TO_GWEI = 1_000_000_000
(gwei div ETH_TO_GWEI).Ether
@@ -47,7 +47,7 @@ func shortLog*(v: FinalityCheckpoints): auto =
chronicles.formatIt FinalityCheckpoints: it.shortLog
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#integer_squareroot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#integer_squareroot
func integer_squareroot*(n: SomeInteger): SomeInteger =
## Return the largest integer ``x`` such that ``x**2 <= n``.
doAssert n >= 0'u64
@@ -114,7 +114,7 @@ func get_previous_epoch*(
## Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
get_previous_epoch(get_current_epoch(state))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#get_randao_mix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#get_randao_mix
func get_randao_mix*(state: ForkyBeaconState, epoch: Epoch): Eth2Digest =
## Return the randao mix at a recent ``epoch``.
state.randao_mixes[epoch mod EPOCHS_PER_HISTORICAL_VECTOR]
@@ -136,7 +136,7 @@ func uint_to_bytes*(x: uint32): array[4, byte] = toBytesLE(x)
func uint_to_bytes*(x: uint16): array[2, byte] = toBytesLE(x)
func uint_to_bytes*(x: uint8): array[1, byte] = toBytesLE(x)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#compute_domain
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#compute_domain
func compute_domain*(
domain_type: DomainType,
fork_version: Version,
@@ -150,7 +150,7 @@ func compute_domain*(
result[0..3] = domain_type.data
result[4..31] = fork_data_root.data.toOpenArray(0, 27)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#get_domain
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#get_domain
func get_domain*(
fork: Fork,
domain_type: DomainType,
@@ -209,7 +209,7 @@ func has_flag*(flags: ParticipationFlags, flag_index: int): bool =
let flag = ParticipationFlags(1'u8 shl flag_index)
(flags and flag) == flag
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_sync_committee_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#is_sync_committee_update
template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool =
when update is SomeForkyLightClientUpdateWithSyncCommittee:
update.next_sync_committee_branch !=
@@ -217,25 +217,25 @@ template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool =
else:
false
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#is_finality_update
template is_finality_update*(update: SomeForkyLightClientUpdate): bool =
when update is SomeForkyLightClientUpdateWithFinality:
update.finality_branch != default(typeof(update.finality_branch))
else:
false
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
template is_next_sync_committee_known*(store: ForkyLightClientStore): bool =
store.next_sync_committee != default(typeof(store.next_sync_committee))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#get_safety_threshold
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#get_safety_threshold
func get_safety_threshold*(store: ForkyLightClientStore): uint64 =
max(
store.previous_max_active_participants,
store.current_max_active_participants
) div 2
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_better_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#is_better_update
type LightClientUpdateMetadata* = object
attested_slot*, finalized_slot*, signature_slot*: Slot
has_sync_committee*, has_finality*: bool
@@ -326,24 +326,24 @@ template is_better_update*[
new_update: A, old_update: B): bool =
is_better_data(toMeta(new_update), toMeta(old_update))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap
func contextEpoch*(bootstrap: ForkyLightClientBootstrap): Epoch =
bootstrap.header.beacon.slot.epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch =
update.attested_header.beacon.slot.epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
func is_merge_transition_complete*(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState): bool =
const defaultExecutionPayloadHeader =
default(typeof(state.latest_execution_payload_header))
state.latest_execution_payload_header != defaultExecutionPayloadHeader
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/sync/optimistic.md#helpers
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/sync/optimistic.md#helpers
func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
when typeof(blck).toFork >= ConsensusFork.Bellatrix:
const defaultExecutionPayload =
@@ -352,7 +352,7 @@ func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
else:
false
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#is_merge_transition_block
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#is_merge_transition_block
func is_merge_transition_block(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState,
body: bellatrix.BeaconBlockBody | bellatrix.TrustedBeaconBlockBody |

View File

@@ -887,13 +887,6 @@ proc readValue*(reader: var JsonReader, value: var RemoteKeystore)
if provenBlockProperties.isNone:
reader.raiseUnexpectedValue("The required field `proven_block_properties` is missing")
let keystoreFlags =
block:
var res: set[RemoteKeystoreFlag]
if ignoreSslVerification.isSome():
res.incl(RemoteKeystoreFlag.IgnoreSSLVerification)
res
value = case remoteType.get(RemoteSignerType.Web3Signer)
of RemoteSignerType.Web3Signer:
RemoteKeystore(
@@ -1373,13 +1366,13 @@ proc createWallet*(kdfKind: KdfKind,
crypto: crypto,
nextAccount: nextAccount.get(0))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#bls_withdrawal_prefix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#bls_withdrawal_prefix
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
var bytes = eth2digest(k.toRaw())
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
bytes
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/deposit-contract.md#withdrawal-credentials
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/deposit-contract.md#withdrawal-credentials
proc makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest =
makeWithdrawalCredentials(k.toPubKey())

View File

@@ -15,7 +15,7 @@ import
from ../consensus_object_pools/block_pools_types import VerifierError
export block_pools_types.VerifierError
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
func initialize_light_client_store*(
trusted_block_root: Eth2Digest,
bootstrap: ForkyLightClientBootstrap,
@@ -42,7 +42,7 @@ func initialize_light_client_store*(
current_sync_committee: bootstrap.current_sync_committee,
optimistic_header: bootstrap.header))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#validate_light_client_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#validate_light_client_update
proc validate_light_client_update*(
store: ForkyLightClientStore,
update: SomeForkyLightClientUpdate,
@@ -159,7 +159,7 @@ proc validate_light_client_update*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#apply_light_client_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#apply_light_client_update
func apply_light_client_update(
store: var ForkyLightClientStore,
update: SomeForkyLightClientUpdate): bool =
@@ -190,7 +190,7 @@ func apply_light_client_update(
didProgress = true
didProgress
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#process_light_client_store_force_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#process_light_client_store_force_update
type
ForceUpdateResult* = enum
NoUpdate,
@@ -223,7 +223,7 @@ func process_light_client_store_force_update*(
store.best_valid_update.reset()
res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/sync-protocol.md#process_light_client_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/sync-protocol.md#process_light_client_update
proc process_light_client_update*(
store: var ForkyLightClientStore,
update: SomeForkyLightClientUpdate,

View File

@@ -0,0 +1,87 @@
# beacon_chain
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import ".."/datatypes/[altair, capella, deneb]
from stew/byteutils import to0xHex
{.push raises: [].}
type
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/builder.md#builderbid
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/capella/builder.md#executionpayloadheader
BuilderBid* = object
header*: deneb.ExecutionPayloadHeader
value*: UInt256
pubkey*: ValidatorPubKey
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/builder.md#signedbuilderbid
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/capella/builder.md#executionpayloadheader
SignedBuilderBid* = object
message*: BuilderBid
signature*: ValidatorSig
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/capella/builder.md#blindedbeaconblockbody
# https://github.com/ethereum/builder-specs/blob/0b913daaa491cd889083827375977a6285e684bd/specs/deneb/builder.md#blindedbeaconblockbody
BlindedBeaconBlockBody* = object
randao_reveal*: ValidatorSig
eth1_data*: Eth1Data
graffiti*: GraffitiBytes
proposer_slashings*: List[ProposerSlashing, Limit MAX_PROPOSER_SLASHINGS]
attester_slashings*: List[AttesterSlashing, Limit MAX_ATTESTER_SLASHINGS]
attestations*: List[Attestation, Limit MAX_ATTESTATIONS]
deposits*: List[Deposit, Limit MAX_DEPOSITS]
voluntary_exits*: List[SignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS]
sync_aggregate*: SyncAggregate
execution_payload_header*:
capella.ExecutionPayloadHeader
bls_to_execution_changes*:
List[SignedBLSToExecutionChange,
Limit MAX_BLS_TO_EXECUTION_CHANGES]
blob_kzg_commitments*: KzgCommitments # [New in Deneb]
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/builder.md#blindedbeaconblock
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/capella/builder.md#blindedbeaconblockbody
# https://github.com/ethereum/builder-specs/blob/0b913daaa491cd889083827375977a6285e684bd/specs/deneb/builder.md#blindedbeaconblockbody
BlindedBeaconBlock* = object
slot*: Slot
proposer_index*: uint64
parent_root*: Eth2Digest
state_root*: Eth2Digest
body*: BlindedBeaconBlockBody # [Modified in Deneb]
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/builder.md#signedblindedbeaconblock
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/capella/builder.md#blindedbeaconblockbody
SignedBlindedBeaconBlock* = object
message*: BlindedBeaconBlock
signature*: ValidatorSig
func shortLog*(v: BlindedBeaconBlock): auto =
(
slot: shortLog(v.slot),
proposer_index: v.proposer_index,
parent_root: shortLog(v.parent_root),
state_root: shortLog(v.state_root),
eth1data: v.body.eth1_data,
graffiti: $v.body.graffiti,
proposer_slashings_len: v.body.proposer_slashings.len(),
attester_slashings_len: v.body.attester_slashings.len(),
attestations_len: v.body.attestations.len(),
deposits_len: v.body.deposits.len(),
voluntary_exits_len: v.body.voluntary_exits.len(),
sync_committee_participants: v.body.sync_aggregate.num_active_participants,
block_number: v.body.execution_payload_header.block_number,
# TODO checksum hex? shortlog?
fee_recipient: to0xHex(v.body.execution_payload_header.fee_recipient.data),
bls_to_execution_changes_len: v.body.bls_to_execution_changes.len(),
blob_kzg_commitments_len: 0, # Deneb compat
)
func shortLog*(v: SignedBlindedBeaconBlock): auto =
(
blck: shortLog(v.message),
signature: shortLog(v.signature)
)

View File

@@ -0,0 +1,27 @@
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
chronos, presto/client,
".."/eth2_apis/[rest_types, eth2_rest_serialization]
export chronos, client, rest_types, eth2_rest_serialization
proc getHeaderDeneb*(slot: Slot,
parent_hash: Eth2Digest,
pubkey: ValidatorPubKey
): RestResponse[GetHeaderResponseDeneb] {.
rest, endpoint: "/eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}",
meth: MethodGet, connection: {Dedicated, Close}.}
## https://github.com/ethereum/builder-specs/blob/v0.3.0/apis/builder/header.yaml
proc submitBlindedBlock*(body: deneb_mev.SignedBlindedBeaconBlock
): RestResponse[SubmitBlindedBlockResponseDeneb] {.
rest, endpoint: "/eth/v1/builder/blinded_blocks",
meth: MethodPost, connection: {Dedicated, Close}.}
## https://github.com/ethereum/builder-specs/blob/v0.3.0/apis/builder/blinded_blocks.yaml

View File

@@ -14,8 +14,8 @@ import
export base
const
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/p2p-interface.md#topics-and-messages
topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy"
topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy"
topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy"
@@ -27,7 +27,7 @@ const
MAX_REQUEST_BLOCKS* = 1024
RESP_TIMEOUT* = 10.seconds
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#configuration
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#configuration
MAX_REQUEST_LIGHT_CLIENT_UPDATES* = 128
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#configuration
@@ -68,7 +68,7 @@ func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string =
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/p2p-interface.md#topics-and-messages
func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string =
eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix
@@ -94,28 +94,32 @@ func getAttestationTopic*(forkDigest: ForkDigest,
## For subscribing and unsubscribing to/from a subnet.
eth2Prefix(forkDigest) & "beacon_attestation_" & $(subnetId) & "/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/p2p-interface.md#topics-and-messages
func getSyncCommitteeTopic*(forkDigest: ForkDigest,
subcommitteeIdx: SyncSubcommitteeIndex): string =
## For subscribing and unsubscribing to/from a subnet.
eth2Prefix(forkDigest) & "sync_committee_" & $subcommitteeIdx & "/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/p2p-interface.md#topics-and-messages
func getSyncCommitteeContributionAndProofTopic*(forkDigest: ForkDigest): string =
## For subscribing and unsubscribing to/from a subnet.
eth2Prefix(forkDigest) & "sync_committee_contribution_and_proof/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blob_sidecar_index
func getBlobSidecarTopic*(forkDigest: ForkDigest,
index: BlobIndex): string =
eth2Prefix(forkDigest) & "blob_sidecar_" & $index & "/ssz_snappy"
subnet_id: SubnetId): string =
eth2Prefix(forkDigest) & "blob_sidecar_" & $subnet_id & "/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/validator.md#sidecar
func compute_subnet_for_blob_sidecar*(blob_index: BlobIndex): SubnetId =
SubnetId(blob_index mod BLOB_SIDECAR_SUBNET_COUNT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#light_client_finality_update
func getLightClientFinalityUpdateTopic*(forkDigest: ForkDigest): string =
## For broadcasting or obtaining the latest `LightClientFinalityUpdate`.
eth2Prefix(forkDigest) & "light_client_finality_update/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
func getLightClientOptimisticUpdateTopic*(forkDigest: ForkDigest): string =
## For broadcasting or obtaining the latest `LightClientOptimisticUpdate`.
eth2Prefix(forkDigest) & "light_client_optimistic_update/ssz_snappy"
@@ -152,7 +156,7 @@ func getDiscoveryForkID*(cfg: RuntimeConfig,
next_fork_version: current_fork_version,
next_fork_epoch: FAR_FUTURE_EPOCH)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/p2p-interface.md#transitioning-the-gossip
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/p2p-interface.md#transitioning-the-gossip
type GossipState* = set[ConsensusFork]
func getTargetGossipState*(
epoch, ALTAIR_FORK_EPOCH, BELLATRIX_FORK_EPOCH, CAPELLA_FORK_EPOCH,
@@ -194,7 +198,7 @@ func getTargetGossipState*(
targetForks
func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] =
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#sync-committee-subnet-stability
if epoch.is_sync_committee_period():
return Opt.some 0'u64
let epochsBefore =
@@ -213,7 +217,7 @@ func getSyncSubnets*(
if not nodeHasPubkey(pubkey):
continue
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-message
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#broadcast-sync-committee-message
# The first quarter of the pubkeys map to subnet 0, the second quarter to
# subnet 1, the third quarter to subnet 2 and the final quarter to subnet
# 3.

View File

@@ -23,7 +23,6 @@ const
# Constants from `validator.md` not covered by config/presets in the spec
TARGET_AGGREGATORS_PER_COMMITTEE*: uint64 = 16
RANDOM_SUBNETS_PER_VALIDATOR*: uint64 = 1
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION*: uint64 = 256
type
@@ -91,6 +90,7 @@ const
ignoredValues = [
"TRANSITION_TOTAL_DIFFICULTY", # Name that appears in some altair alphas, obsolete, remove when no more testnets
"MIN_ANCHOR_POW_BLOCK_DIFFICULTY", # Name that appears in some altair alphas, obsolete, remove when no more testnets
"RANDOM_SUBNETS_PER_VALIDATOR", # Removed in consensus-specs v1.4.0
]
when const_preset == "mainnet":
@@ -468,7 +468,7 @@ func parse(T: type DomainType, input: string): T
{.raises: [ValueError, Defect].} =
DomainType hexToByteArray(input, 4)
proc readRuntimeConfig(
proc readRuntimeConfig*(
fileContent: string, path: string): (RuntimeConfig, seq[string]) {.
raises: [IOError, PresetFileError, PresetIncompatibleError, Defect].} =
var
@@ -562,7 +562,6 @@ proc readRuntimeConfig(
checkCompatibility MAX_VOLUNTARY_EXITS
checkCompatibility TARGET_AGGREGATORS_PER_COMMITTEE
checkCompatibility RANDOM_SUBNETS_PER_VALIDATOR
checkCompatibility EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION
checkCompatibility ATTESTATION_SUBNET_COUNT

View File

@@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/altair.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/altair.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Gnosis preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/bellatrix.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/bellatrix.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/altair.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/altair.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/bellatrix.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/bellatrix.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Capella
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/capella.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/capella.yaml
const
# Max operations per block
# ---------------------------------------------------------------

View File

@@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Deneb
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/mainnet/deneb.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/mainnet/deneb.yaml
const
# `uint64(4096)`
FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096

View File

@@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Minimal preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/minimal/altair.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/minimal/altair.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Minimal preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/minimal/bellatrix.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/minimal/bellatrix.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Minimal preset - Capella
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/minimal/capella.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/minimal/capella.yaml
const
# Max operations per block
# ---------------------------------------------------------------

View File

@@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Minimal preset - Deneb
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/presets/minimal/deneb.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/presets/minimal/deneb.yaml
const
# [customized]
FIELD_ELEMENTS_PER_BLOB*: uint64 = 4

View File

@@ -44,7 +44,7 @@ func compute_slot_signing_root*(
fork, DOMAIN_SELECTION_PROOF, epoch, genesis_validators_root)
compute_signing_root(slot, domain)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#aggregation-selection
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#aggregation-selection
func get_slot_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
privkey: ValidatorPrivKey): CookedSig =
@@ -59,7 +59,7 @@ func compute_epoch_signing_root*(
let domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root)
compute_signing_root(epoch, domain)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#randao-reveal
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#randao-reveal
func get_epoch_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
privkey: ValidatorPrivKey): CookedSig =
@@ -147,7 +147,7 @@ func compute_aggregate_and_proof_signing_root*(
fork, DOMAIN_AGGREGATE_AND_PROOF, epoch, genesis_validators_root)
compute_signing_root(aggregate_and_proof, domain)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#broadcast-aggregate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#broadcast-aggregate
func get_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
aggregate_and_proof: AggregateAndProof,
privkey: ValidatorPrivKey): CookedSig =
@@ -265,7 +265,7 @@ proc verify_voluntary_exit_signature*(
blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#prepare-sync-committee-message
func compute_sync_committee_message_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest,
slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest =
@@ -300,7 +300,7 @@ proc verify_sync_committee_signature*(
blsFastAggregateVerify(pubkeys, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#aggregation-selection
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#aggregation-selection
func compute_sync_committee_selection_proof_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest,
slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest =
@@ -331,7 +331,7 @@ proc verify_sync_committee_selection_proof*(
blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#signature
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#signature
func compute_contribution_and_proof_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest,
msg: ContributionAndProof): Eth2Digest =
@@ -349,7 +349,7 @@ proc get_contribution_and_proof_signature*(
blsSign(privkey, signing_root.data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#aggregation-selection
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#aggregation-selection
func is_sync_committee_aggregator*(signature: ValidatorSig): bool =
let
signatureDigest = eth2digest(signature.blob)
@@ -369,7 +369,7 @@ proc verify_contribution_and_proof_signature*(
func compute_builder_signing_root(
fork: Fork,
msg: bellatrix_mev.BuilderBid | capella_mev.BuilderBid |
ValidatorRegistrationV1): Eth2Digest =
deneb_mev.BuilderBid | ValidatorRegistrationV1): Eth2Digest =
# Uses genesis fork version regardless
doAssert fork.current_version == fork.previous_version
@@ -384,12 +384,13 @@ proc get_builder_signature*(
blsSign(privkey, signing_root.data)
proc verify_builder_signature*(
fork: Fork, msg: bellatrix_mev.BuilderBid | capella_mev.BuilderBid,
fork: Fork, msg: bellatrix_mev.BuilderBid | capella_mev.BuilderBid |
deneb_mev.BuilderBid,
pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool =
let signing_root = compute_builder_signing_root(fork, msg)
blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
func compute_bls_to_execution_change_signing_root*(
genesisFork: Fork, genesis_validators_root: Eth2Digest,
msg: BLSToExecutionChange): Eth2Digest =

View File

@@ -361,7 +361,7 @@ template partialBeaconBlock*(
deposits: List[Deposit, Limit MAX_DEPOSITS](deposits),
voluntary_exits: validator_changes.voluntary_exits))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/validator.md#preparing-a-beaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/validator.md#preparing-a-beaconblock
template partialBeaconBlock*(
cfg: RuntimeConfig,
state: var altair.HashedBeaconState,
@@ -420,7 +420,7 @@ template partialBeaconBlock*(
sync_aggregate: sync_aggregate,
execution_payload: execution_payload.executionPayload))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/validator.md#block-proposal
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/validator.md#block-proposal
template partialBeaconBlock*(
cfg: RuntimeConfig,
state: var capella.HashedBeaconState,
@@ -537,7 +537,7 @@ proc makeBeaconBlock*(
forkyState.data.latest_execution_payload_header.transactions_root =
transactions_root.get
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconblockbody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#beaconblockbody
# Effectively hash_tree_root(ExecutionPayload) with the beacon block
# body, with the execution payload replaced by the execution payload
# header. htr(payload) == htr(payload header), so substitute.
@@ -572,8 +572,28 @@ proc makeBeaconBlock*(
hash_tree_root(sync_aggregate),
execution_payload_root.get,
hash_tree_root(validator_changes.bls_to_execution_changes)])
elif consensusFork > ConsensusFork.Capella:
discard denebImplementationMissing
elif consensusFork == ConsensusFork.Deneb:
when executionPayload is deneb.ExecutionPayloadForSigning:
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/beacon-chain.md#beaconblockbody
forkyState.data.latest_block_header.body_root = hash_tree_root(
[hash_tree_root(randao_reveal),
hash_tree_root(eth1_data),
hash_tree_root(graffiti),
hash_tree_root(validator_changes.proposer_slashings),
hash_tree_root(validator_changes.attester_slashings),
hash_tree_root(List[Attestation, Limit MAX_ATTESTATIONS](attestations)),
hash_tree_root(List[Deposit, Limit MAX_DEPOSITS](deposits)),
hash_tree_root(validator_changes.voluntary_exits),
hash_tree_root(sync_aggregate),
execution_payload_root.get,
hash_tree_root(validator_changes.bls_to_execution_changes),
hash_tree_root(executionPayload.kzgs)
])
else:
raiseAssert "Attempt to use non-Deneb payload with post-Deneb state"
else:
static: raiseAssert "Unreachable"
state.`kind Data`.root = hash_tree_root(state.`kind Data`.data)
blck.`kind Data`.state_root = state.`kind Data`.root

View File

@@ -9,7 +9,7 @@
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#block-processing
#
# The entry point is `process_block` which is at the bottom of this file.
@@ -126,7 +126,7 @@ func process_eth1_data(
state.eth1_data = body.eth1_data
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#is_slashable_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#is_slashable_validator
func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
# Check if ``validator`` is slashable.
(not validator.slashed) and
@@ -182,7 +182,7 @@ proc check_proposer_slashing*(
withState(state):
check_proposer_slashing(forkyState.data, proposer_slashing, flags)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#proposer-slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#proposer-slashings
proc process_proposer_slashing*(
cfg: RuntimeConfig, state: var ForkyBeaconState,
proposer_slashing: SomeProposerSlashing, flags: UpdateFlags,
@@ -246,7 +246,7 @@ proc check_attester_slashing*(
withState(state):
check_attester_slashing(forkyState.data, attester_slashing, flags)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#attester-slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#attester-slashings
proc process_attester_slashing*(
cfg: RuntimeConfig,
state: var ForkyBeaconState,
@@ -330,7 +330,8 @@ proc process_deposit*(cfg: RuntimeConfig,
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/beacon-chain.md#modified-process_voluntary_exit
proc check_voluntary_exit*(
cfg: RuntimeConfig,
state: ForkyBeaconState,
@@ -364,8 +365,16 @@ proc check_voluntary_exit*(
# Verify signature
if skipBlsValidation notin flags:
let exitSignatureFork =
when typeof(state).toFork >= ConsensusFork.Deneb:
Fork(
previous_version: cfg.CAPELLA_FORK_VERSION,
current_version: cfg.CAPELLA_FORK_VERSION,
epoch: cfg.CAPELLA_FORK_EPOCH)
else:
state.fork
if not verify_voluntary_exit_signature(
state.fork, state.genesis_validators_root, voluntary_exit,
exitSignatureFork, state.genesis_validators_root, voluntary_exit,
validator[].pubkey, signed_voluntary_exit.signature):
return err("Exit: invalid signature")
@@ -379,7 +388,7 @@ proc check_voluntary_exit*(
withState(state):
check_voluntary_exit(cfg, forkyState.data, signed_voluntary_exit, flags)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#voluntary-exits
proc process_voluntary_exit*(
cfg: RuntimeConfig,
state: var ForkyBeaconState,
@@ -410,7 +419,7 @@ proc process_bls_to_execution_change*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#operations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#modified-process_operations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#modified-process_operations
proc process_operations(cfg: RuntimeConfig,
state: var ForkyBeaconState,
body: SomeForkyBeaconBlockBody,
@@ -456,11 +465,11 @@ func get_participant_reward*(total_active_balance: Gwei): Gwei =
WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH
max_participant_rewards div SYNC_COMMITTEE_SIZE
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#sync-aggregate-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#sync-aggregate-processing
func get_proposer_reward*(participant_reward: Gwei): Gwei =
participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#sync-aggregate-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#sync-aggregate-processing
proc process_sync_aggregate*(
state: var (altair.BeaconState | bellatrix.BeaconState |
capella.BeaconState | deneb.BeaconState),
@@ -836,7 +845,7 @@ proc process_block*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#block-processing
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
type SomeBellatrixBlock =
bellatrix.BeaconBlock | bellatrix.SigVerifiedBeaconBlock | bellatrix.TrustedBeaconBlock

View File

@@ -243,7 +243,7 @@ func is_unslashed_participating_index(
has_flag(epoch_participation[].item(validator_index), flag_index) and
not state.validators[validator_index].slashed
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#justification-and-finalization
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#justification-and-finalization
type FinalityState = object
slot: Slot
current_epoch_ancestor_root: Eth2Digest
@@ -311,7 +311,7 @@ proc weigh_justification_and_finalization(
## state.justification_bits[1:] = state.justification_bits[:-1]
## state.justification_bits[0] = 0b0
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#misc
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#misc
const JUSTIFICATION_BITS_LENGTH = 4
state.justification_bits = JustificationBits(
@@ -388,7 +388,7 @@ proc weigh_justification_and_finalization(
current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#justification-and-finalization
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#justification-and-finalization
proc process_justification_and_finalization*(
state: var phase0.BeaconState,
balances: TotalBalances, flags: UpdateFlags = {}) =
@@ -424,7 +424,7 @@ proc compute_unrealized_finality*(
justified: finalityState.current_justified_checkpoint,
finalized: finalityState.finalized_checkpoint)
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#justification-and-finalization
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#justification-and-finalization
proc process_justification_and_finalization*(
state: var (altair.BeaconState | bellatrix.BeaconState |
capella.BeaconState | deneb.BeaconState),
@@ -460,7 +460,7 @@ proc compute_unrealized_finality*(
justified: finalityState.current_justified_checkpoint,
finalized: finalityState.finalized_checkpoint)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#helpers
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#helpers
func get_base_reward_sqrt*(state: phase0.BeaconState, index: ValidatorIndex,
total_balance_sqrt: auto): Gwei =
# Spec function recalculates total_balance every time, which creates an
@@ -508,7 +508,7 @@ func get_attestation_component_delta(is_unslashed_attester: bool,
else:
RewardDelta(penalties: base_reward)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#components-of-attestation-deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#components-of-attestation-deltas
func get_source_delta*(validator: RewardStatus,
base_reward: uint64,
balances: TotalBalances,
@@ -631,7 +631,7 @@ func get_attestation_deltas(
info.validators[proposer_index].delta.add(
proposer_delta.get()[1])
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#get_base_reward
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#get_base_reward
func get_base_reward_increment*(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState,
@@ -656,7 +656,7 @@ func get_flag_index_reward*(
else:
0.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#get_flag_index_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#get_flag_index_deltas
func get_unslashed_participating_increment*(
info: altair.EpochInfo | bellatrix.BeaconState, flag_index: int): Gwei =
info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT
@@ -666,7 +666,7 @@ func get_active_increments*(
info: altair.EpochInfo | bellatrix.BeaconState): Gwei =
info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#get_flag_index_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#get_flag_index_deltas
iterator get_flag_index_deltas*(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState,
@@ -890,12 +890,12 @@ func get_adjusted_total_slashing_balance*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#slashings
func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool =
validator.slashed and
epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/beacon-chain.md#slashings
func get_slashing_penalty*(validator: Validator,
@@ -908,8 +908,8 @@ func get_slashing_penalty*(validator: Validator,
penalty_numerator div total_balance * increment
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/bellatrix/beacon-chain.md#slashings
func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) =
let
epoch = get_current_epoch(state)
@@ -953,7 +953,7 @@ func process_effective_balance_updates*(state: var ForkyBeaconState) =
if new_effective_balance != effective_balance:
state.validators.mitem(vidx).effective_balance = new_effective_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#slashings-balances-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#slashings-balances-updates
func process_slashings_reset*(state: var ForkyBeaconState) =
let next_epoch = get_current_epoch(state) + 1
@@ -986,7 +986,7 @@ func process_historical_roots_update*(state: var ForkyBeaconState) =
if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0:
# Equivalent to hash_tree_root(foo: HistoricalBatch), but without using
# significant additional stack or heap.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#historicalbatch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#historicalbatch
# In response to https://github.com/status-im/nimbus-eth2/issues/921
if not state.historical_roots.add state.compute_historical_root():
raiseAssert "no more room for historical roots, so long and thanks for the fish!"
@@ -1058,7 +1058,7 @@ func process_inactivity_updates*(
if pre_inactivity_score != inactivity_score:
state.inactivity_scores[index] = inactivity_score
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#historical-summaries-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#historical-summaries-updates
func process_historical_summaries_update*(
state: var (capella.BeaconState | deneb.BeaconState)):
Result[void, cstring] =
@@ -1074,7 +1074,7 @@ func process_historical_summaries_update*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#epoch-processing
proc process_epoch*(
cfg: RuntimeConfig, state: var phase0.BeaconState, flags: UpdateFlags,
cache: var StateCache, info: var phase0.EpochInfo): Result[void, cstring] =
@@ -1178,7 +1178,7 @@ proc process_epoch*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/capella/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/capella/beacon-chain.md#epoch-processing
proc process_epoch*(
cfg: RuntimeConfig,
state: var (capella.BeaconState | deneb.BeaconState),
@@ -1189,7 +1189,7 @@ proc process_epoch*(
info.init(state)
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#justification-and-finalization
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#justification-and-finalization
process_justification_and_finalization(state, info.balances, flags)
# state.slot hasn't been incremented yet.
@@ -1208,10 +1208,10 @@ proc process_epoch*(
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#rewards-and-penalties
process_rewards_and_penalties(cfg, state, info)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#registry-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#registry-updates
? process_registry_updates(cfg, state, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/altair/beacon-chain.md#slashings
process_slashings(state, info.balances.current_epoch)
process_eth1_data_reset(state)

View File

@@ -21,7 +21,7 @@ const
PIVOT_VIEW_SIZE = SEED_SIZE + ROUND_SIZE
TOTAL_SIZE = PIVOT_VIEW_SIZE + POSITION_WINDOW_SIZE
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#compute_committee
# Port of https://github.com/protolambda/zrnt/blob/master/eth2/beacon/shuffle.go
# Shuffles or unshuffles, depending on the `dir` (true for shuffling, false for unshuffling
@@ -301,7 +301,7 @@ func get_beacon_committee_len*(
withState(state):
get_beacon_committee_len(forkyState.data, slot, index, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#compute_shuffled_index
func compute_shuffled_index*(
index: uint64, index_count: uint64, seed: Eth2Digest): uint64 =
## Return the shuffled index corresponding to ``seed`` (and ``index_count``).
@@ -336,7 +336,7 @@ func compute_shuffled_index*(
cur_idx_permuted
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#compute_proposer_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#compute_proposer_index
func compute_proposer_index(state: ForkyBeaconState,
indices: seq[ValidatorIndex], seed: Eth2Digest): Opt[ValidatorIndex] =
## Return from ``indices`` a random index sampled by effective balance.
@@ -363,7 +363,7 @@ func compute_proposer_index(state: ForkyBeaconState,
return Opt.some(candidate_index)
i += 1
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*(
state: ForkyBeaconState, cache: var StateCache, slot: Slot):
Opt[ValidatorIndex] =
@@ -401,7 +401,7 @@ func get_beacon_proposer_index*(
return res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*(state: ForkyBeaconState, cache: var StateCache):
Opt[ValidatorIndex] =
## Return the beacon proposer index at the current slot.
@@ -413,7 +413,7 @@ func get_beacon_proposer_index*(state: ForkedHashedBeaconState,
withState(state):
get_beacon_proposer_index(forkyState.data, cache, slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/validator.md#aggregation-selection
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/phase0/validator.md#aggregation-selection
func is_aggregator*(committee_len: uint64, slot_signature: ValidatorSig): bool =
let modulo = max(1'u64, committee_len div TARGET_AGGREGATORS_PER_COMMITTEE)
bytes_to_uint64(eth2digest(
@@ -470,3 +470,30 @@ func livenessFailsafeInEffect*(
streakLen = 0
false
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/p2p-interface.md#attestation-subnet-subcription
func compute_subscribed_subnet(node_id: UInt256, epoch: Epoch, index: uint64):
SubnetId =
# Ensure neither `truncate` loses information
static:
doAssert EPOCHS_PER_SUBNET_SUBSCRIPTION <= high(uint64)
doAssert sizeof(UInt256) * 8 == NODE_ID_BITS
doAssert ATTESTATION_SUBNET_PREFIX_BITS < sizeof(SubnetId) * 8
let
node_id_prefix = truncate(
node_id shr (NODE_ID_BITS - ATTESTATION_SUBNET_PREFIX_BITS), uint64)
node_offset = truncate(node_id mod EPOCHS_PER_SUBNET_SUBSCRIPTION, uint64)
permutation_seed = eth2digest(uint_to_bytes(
uint64((epoch + node_offset) div EPOCHS_PER_SUBNET_SUBSCRIPTION)))
permutated_prefix = compute_shuffled_index(
node_id_prefix,
1 shl ATTESTATION_SUBNET_PREFIX_BITS,
permutation_seed,
)
SubnetId((permutated_prefix + index) mod ATTESTATION_SUBNET_COUNT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/p2p-interface.md#attestation-subnet-subcription
iterator compute_subscribed_subnets*(node_id: UInt256, epoch: Epoch): SubnetId =
for index in 0'u64 ..< SUBNETS_PER_NODE:
yield compute_subscribed_subnet(node_id, epoch, index)

Some files were not shown because too many files have changed in this diff Show More