Compare commits

...

70 Commits

Author SHA1 Message Date
Manu NALEPA
491a173678 Request block by root: Return early and improve logging. 2025-02-03 16:21:32 +01:00
terence
910609a75f Handle errors as no-op for execution requests (#14826)
* Update electra core processing error handling

* Add test for IsExecutionRequestError

* Add TestProcessOperationsWithNilRequests

* gazelle

---------

Co-authored-by: Preston Van Loon <preston@pvl.dev>
2025-01-31 22:17:27 +00:00
kasey
f9c202190a warnings for flags due for deprecation (#14856)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-01-31 21:30:27 +00:00
Radosław Kapka
4a63a194b1 Address @jtraglia's comments regarding Electra code (#14833)
* change field IDs in `AggregateAttestationAndProofElectra`

* fix typo in `validator.proto`

* correct slashing indices length and shashings length

* check length in indexed attestation's `ToConsensus` method

* use `fieldparams.BLSSignatureLength`

* Add length checks for execution request

* fix typo in `beacon_state.proto`

* fix typo in `ssz_proto_library.bzl`

* fix error messages about incorrect types in block factory

* add Electra case to `BeaconBlockContainerToSignedBeaconBlock`

* move PeerDAS config items to PeerDAS section

* remove redundant params

* rename `PendingDepositLimit` to `PendingDepositsLimit`

* improve requests unmarshaling errors

* rename `get_validator_max_effective_balance` to `get_max_effective_balance`

* fix typo in `consolidations.go`

* rename `index` to `validator_index` in `PendingPartialWithdrawal`

* rename `randomByte` to `randomBytes` in `validators.go`

* fix for version in a comment in `validator.go`

* changelog <3

* Revert "rename `index` to `validator_index` in `PendingPartialWithdrawal`"

This reverts commit 87e4da0ea2.
2025-01-31 15:41:52 +00:00
james-prysm
d887536eb7 skip eth1data voting after electra (#14835)
* wip skip eth1data voting after electra

* updating technique

* adding fix for electra eth1 voting

* fixing linting on test

* seeing if reversing genesis state fixes problem

* increasing safety of legacy check

* review feedback

* forgot to fix tests

* nishant's feedback

* nishant's feedback

* rename function a little

* Update beacon-chain/core/helpers/legacy.go

Co-authored-by: Jun Song <87601811+syjn99@users.noreply.github.com>

* fixing naming

---------

Co-authored-by: Jun Song <87601811+syjn99@users.noreply.github.com>
2025-01-31 15:19:28 +00:00
Radosław Kapka
1069da1cd2 Convert Phase0 slashing to Electra slashings at the fork (#14844)
* EIP-7549: slasher

* update chunks and detection

* update tests

* encode+decode

* timer

* test fixes

* testing the timer

* Decouple pool from service

* update mock

* cleanup

* make review easier

* comments and changelog
2025-01-31 03:17:52 +00:00
Potuz
4a487ba3bc Don't mark blocks as invalid on context deadlines (#14838)
* Don't mark blocks as invalid on context deadlines

When processing state transition, if the error is because of a context
deadline, do not mark it as invalid.

* review

* fix changelog
2025-01-31 03:16:16 +00:00
james-prysm
bf81cd4449 Electra blob sidecar API update (#14852)
* adding in versioned header and unit tests

* changelog

* handling case

* changelog
2025-01-31 02:39:27 +00:00
terence
00337fe005 Add nil consolidation check for core processing (#14851) 2025-01-30 22:24:23 +00:00
terence
bb3fba4d8e Add a test for nil withdrawal requeset (#14850) 2025-01-30 21:24:40 +00:00
terence
89967fe209 Move deposit request nil check for all (#14849) 2025-01-30 21:24:31 +00:00
terence
56712b5e49 Update electra spec tests to beta.1 (#14841) 2025-01-29 18:19:07 +00:00
Preston Van Loon
0be9391e62 electra: Improve test coverage for beacon-chain/core/electra/churn.go (#14837)
* Fixed mutants in beacon-chain/core/electra/churn.go

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╍┅
┃ 🧬 Mutant survived: beacon-chain/core/electra/churn.go → Arithmetic
┠┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄
┃ --- beacon-chain/core/electra/churn.go (original)
┃ +++ beacon-chain/core/electra/churn.go (mutated with 'Arithmetic')
┃ @@ -64,7 +64,7 @@
┃       if consolidationBalance > consolidationBalanceToConsume {
┃               balanceToProcess := consolidationBalance - consolidationBalanceToConsume
┃               // additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
┃ -             additionalEpochs, err := math.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
┃ +             additionalEpochs, err := math.Div64(uint64(balanceToProcess+1), uint64(perEpochConsolidationChurn))
┃               if err != nil {
┃                       return 0, err
┃               }
┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╍┅

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╍┅
┃ 🧬 Mutant survived: beacon-chain/core/electra/churn.go → Comparison
┠┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄
┃ --- beacon-chain/core/electra/churn.go (original)
┃ +++ beacon-chain/core/electra/churn.go (mutated with 'Comparison')
┃ @@ -61,7 +61,7 @@
┃       }
┃
┃       // Consolidation doesn't fit in the current earliest epoch.
┃ -     if consolidationBalance > consolidationBalanceToConsume {
┃ +     if consolidationBalance >= consolidationBalanceToConsume {
┃               balanceToProcess := consolidationBalance - consolidationBalanceToConsume
┃               // additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
┃               additionalEpochs, err := math.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╍┅

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╍┅
┃ 🧬 Mutant survived: beacon-chain/core/electra/churn.go → Integer Decrement
┠┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄
┃ --- beacon-chain/core/electra/churn.go (original)
┃ +++ beacon-chain/core/electra/churn.go (mutated with 'Integer Decrement')
┃ @@ -64,7 +64,7 @@
┃       if consolidationBalance > consolidationBalanceToConsume {
┃               balanceToProcess := consolidationBalance - consolidationBalanceToConsume
┃               // additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
┃ -             additionalEpochs, err := math.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
┃ +             additionalEpochs, err := math.Div64(uint64(balanceToProcess-0), uint64(perEpochConsolidationChurn))
┃               if err != nil {
┃                       return 0, err
┃               }
┃
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╍┅

┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╍┅                                                                                                                                                      
┃ 🧬 Mutant survived: beacon-chain/core/electra/churn.go → Integer Increment                                                                                                                  
┠┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄                                                                                                                                                      
┃ --- beacon-chain/core/electra/churn.go (original)                                                                                                                                           
┃ +++ beacon-chain/core/electra/churn.go (mutated with 'Integer Increment')                                                                                                                   
┃ @@ -64,7 +64,7 @@                                                                                                                                                                           
┃       if consolidationBalance > consolidationBalanceToConsume {                                                                                                                             
┃               balanceToProcess := consolidationBalance - consolidationBalanceToConsume                                                                                                      
┃               // additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1                                                                                          
┃ -             additionalEpochs, err := math.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))                                                                           
┃ +             additionalEpochs, err := math.Div64(uint64(balanceToProcess-2), uint64(perEpochConsolidationChurn))                                                                           
┃               if err != nil {                                                                                                                                                               
┃                       return 0, err                                                                                                                                                         
┃               }                                                                                                                                                                             
┃                                                                                                                                                                                             
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╍┅

* Changelog fragment
2025-01-29 02:15:43 +00:00
Dhruv Bodani
4a9c60f75f Implement beacon db pruner (#14687)
* implement weak subjectivity pruner

* fix goimports

* add delete before slot method to database

* add method to interface

* update changelog

* add flags

* wire pruner

* align pruner with backfill service

* rename db method

* fix imports

* delete block slot indices

* check for backfill and initial sync

* add tests

* fix imports

* Update beacon-chain/db/kv/blocks.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update cmd/beacon-chain/flags/base.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Update beacon-chain/db/pruner/pruner.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* cleanup

* fix buildkite

* initialise atomic bool

* delete data from remaining buckets

* fix build

* fix build

* address review comments

* add test for blockParentRootIndicesBucket

* fix changelog

* fix build

* address kasey's comments

* fix build

* add trace span to blockRootsBySlotRange

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-01-28 16:55:50 +00:00
Radosław Kapka
9cf6b93356 Handle AttesterSlashingElectra everywhere in the codebase (#14823)
* Handle `AttesterSlashingElectra` everywhere in the codebase

* simplify `TestProcessAttesterSlashings_AppliesCorrectStatus`
2025-01-28 15:06:37 +00:00
Preston Van Loon
b4220e35c4 CI: Add clang-formatter lint workflow for proto files (#14831)
* Enforce clang-format for proto files in proto/

* Update pb files after #14818

* Changelog fragment
2025-01-27 20:01:21 +00:00
terence
536cded4cc Fix batch deposit processing by retrieving validators from state (#14827)
* Fix batch process new pending deposits by getting validators from state

* Update tt_fix_pending_deposits.md
2025-01-27 18:11:06 +00:00
Bastin
86fc64c917 Lightclient Bootstrap API SSZ support tests (#14824)
* add bootstrap ssz tests

* add changelog entry

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-01-27 12:20:48 +00:00
Nishant Das
5d6a406829 Update to Go 1.23 (#14818)
* Update to Go 1.23

* Update bazel version

* Update rules_go

* Use toolchains_protoc

* Update go_honnef_go_tools

* Update golang.org/x/tools

* Fix violations of SA3000

* Update errcheck by re-exporting the upstream repo

* Remove problematic ginkgo and gomega test helpers. Rewrote tests without these test libraries.

* Update go to 1.23.5

* gofmt with go1.23.5

* Revert Patch

* Unclog

* Update for go 1.23 support

* Fix Lint Issues

* Gazelle

* Fix Build

* Fix Lint

* no lint

* Fix lint

* Fix lint

* Disable intrange

* Preston's review

---------

Co-authored-by: Preston Van Loon <preston@pvl.dev>
2025-01-24 04:53:23 +00:00
james-prysm
2c78e501b3 Builder: Electra (#14344)
* removing skip from test

* builder wip

* removing todo, it's probably ok

* adding more TODOs

* adding fromProtoElectra

* using lightclient
s branch and updating values

* minor fixes

* rolling back dependency changes

* go mod tidy

* adding space back in

* updating builder changes based on execution request changes

* update ssz

* changelog

* updating based on execution request changes

* fixing validation

* adding builder test for electra

* gaz

* attempting to fix test

* fixing ssz

* fixing build and handling develop changes

* gaz

* fixing unfinished function

* fixing test

* fixing important missed regression

* removing unneeded validations

* missed linting

* gofmt

* fixing fulu test

* fixing changelog

* Update bid.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update bid.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update types.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update types.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update james-prysm_builder-electra.md

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/middleware/builder/builder.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing review feedback and updating e2e

* fixing parsing bid version

* reversing incorrect check

* improving tests and updating more code based on review feedback

* gofmt

* fixing unit tests

* more feedback from terence

* gofmt

* Update api/client/builder/types.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer_bellatrix.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update api/client/builder/types.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing nitpicks

* gofmt

* radek feedback

* improves error

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-01-23 17:48:19 +00:00
Jun Song
c8cb0f37b2 fix: early return for packing local deposits when EIP-6110 is applied (#14697)
* fix: early return for gathering local deposits when EIP-6110 is applied

* Add an entry on CHANGELOG.md

* Fix weird indent at CHANGELOG.md

* Add changelog

* Fix CHANGELOG.md

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2025-01-23 16:21:57 +00:00
Manu NALEPA
78722239da nodeFilter: Add GossipBlobSidecarMessage case. (#14822)
Before this commit, this kind of logs were possible:

```
[2025-01-22 17:18:48]  DEBUG sync: Could not search for peers error=node filter: no subnet exists for provided topic: /eth2/d1f05cae/blob_sidecar_0/ssz_snappy
[2025-01-22 17:18:48]  DEBUG sync: Could not search for peers error=node filter: no subnet exists for provided topic: /eth2/d1f05cae/blob_sidecar_1/ssz_snappy
[2025-01-22 17:18:48]  DEBUG sync: Could not search for peers error=node filter: no subnet exists for provided topic: /eth2/d1f05cae/blob_sidecar_2/ssz_snappy
[2025-01-22 17:18:48]  DEBUG sync: Could not search for peers error=node filter: no subnet exists for provided topic: /eth2/d1f05cae/blob_sidecar_3/ssz_snappy
[2025-01-22 17:18:48]  DEBUG sync: Could not search for peers error=node filter: no subnet exists for provided topic: /eth2/d1f05cae/blob_sidecar_4/ssz_snappy
[2025-01-22 17:18:48]  DEBUG sync: Could not search for peers error=node filter: no subnet exists for provided topic: /eth2/d1f05cae/blob_sidecar_5/ssz_snappy
[2025-01-22 17:18:48]  DEBUG sync: Could not search for peers error=node filter: no subnet exists for provided topic: /eth2/d1f05cae/blob_sidecar_6/ssz_snappy
[2025-01-22 17:18:48]  DEBUG sync: Could not search for peers error=node filter: no subnet exists for provided topic: /eth2/d1f05cae/blob_sidecar_7/ssz_snappy
[2025-01-22 17:18:48]  DEBUG sync: Could not search for peers error=node filter: no subnet exists for provided topic: /eth2/d1f05cae/blob_sidecar_8/ssz_snappy
```

Note this bug has no real other impact than logging these errors: Since all nodes are subscribed to these subnets, as soon as some peers are found, there is no more issue.

Why not using `s.subscribe` instead of `s.subscribeWithParameters`?
Blobs subnets were before considered as static subnets. But since Electra, the number of subnets is a function of the epoch.
So it's better to use `s.subscribeWithParameters` than 2 specific but almost identic functions in `s.subscribe`.

Why `filterPeerForBlobSubnet` is the only one returning always `true`?
Because blobs subnets are actually the only subnets which are both dynamic AND which have to be subscribed by all the nodes.
So, `filterPeerForBlobSubnet` does not filter out any node.
2025-01-22 19:56:55 +00:00
Manu NALEPA
3ffef024c7 UpgradeToFulu: Respect the specification. (#14821)
188a2ff818/specs/fulu/fork.md (upgrading-the-state)

Before this commit, the `UpgradeToFulu` did not really respect the specification. This commit fixes that.

How can we be sure now the specification is really respected?

As long as the equivalent of https://github.com/ethereum/consensus-spec-tests/tree/master/tests/mainnet/electra/fork/fork/pyspec_tests are not released, we cannot be sure.

However, with this commit, Prysm and Lighthouse do agree with the post state after the Fulu fork (which is not the case without this commit).

So either both Prysm and Lighthouse are both right,
either the are both wrong (but in the exact same way, which has a pretty low likelyhood).
2025-01-22 18:39:12 +00:00
Radosław Kapka
a1eef44492 Update slasher service to Electra (#14812)
* Update slasher service to Electra

* Update beacon-chain/slasher/chunks.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Update beacon-chain/slasher/chunks_test.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Manu's review

* Manu's review again

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-01-22 17:32:19 +00:00
Radosław Kapka
2845ab9365 Update proto_test.go to Electra (#14817) 2025-01-21 20:30:52 +00:00
Radosław Kapka
4f43c15ebb Update rewards API to Electra (#14816) 2025-01-21 20:13:36 +00:00
Radosław Kapka
e473d7cc4d Use SingleAttestation for Fulu in p2p attestation map (#14809)
* Use `SingleAttestation` for Fulu in p2p attestation map.

* Fix `TestExtractDataType`.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-01-20 10:35:52 +00:00
Potuz
794a05af26 Remove unused Copy() from the ReadOnlyBeaconBlock interface (#14811) 2025-01-19 19:56:14 +00:00
hidewrong
15df13c7e6 Signed-off-by: hidewrong <hidewrong@outlook.com> (#14792)
Signed-off-by: hidewrong <hidewrong@outlook.com>
2025-01-17 15:59:29 +00:00
Potuz
b76f7fed2f move credential helpers to ReadOnlyValidator methods (#14808)
* move credential helpers to ReadOnlyValidator methods

* Changelog + Gazelle

* Fix nil tests

* Fix more nil tests

* Fix another nil test
2025-01-17 12:37:08 +00:00
james-prysm
e263687ea5 Remote Signer: Electra (#14477)
* updating blockv2 to handle electra blocks

* adding aggregate attesation and proof electra

* gaz

* changelogs

* updating web3signer dependency

* test mock was flipped

* fixing hex value

* accidently checked in dependency changes

* preston feedback

* readding old metrics to not break linting

* review feedback and changelog

* gaz
2025-01-16 17:51:59 +00:00
james-prysm
0b16c79c35 fix e2e scenario evaluators (#14798)
* make fork evaluators conditional on fork for scenario tests

* changelog
2025-01-16 04:38:26 +00:00
terence
dc002c2806 Truncate ExtraData to 32 bytes for to satisfy SSZ marshal (#14803) 2025-01-16 00:14:56 +00:00
kasey
e7e48dcaf9 version pin unclog release (#14802)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-01-15 23:50:10 +00:00
james-prysm
8f43f6cc84 adding in error for generic check (#14801) 2025-01-15 21:36:09 +00:00
Manu NALEPA
e07341e1d5 ToBlinded: Use Fulu. (#14797)
* `ToBlinded`: Use Fulu.

* Fix Sammy's comment.

* `unmarshalState`: Use `hasFuluKey`.
2025-01-15 19:50:48 +00:00
Preston Van Loon
ef293e52f8 Use ip.addr.tools for DNS resolution test in p2p (#14800)
* Use ip.addr.tools for DNS resolution test in p2p

* Changelog fragment
2025-01-15 18:07:00 +00:00
Jun Song
72cc63a6a3 Clean TestCanUpgrade* tests (#14791)
* Clean TestCanUpgrade* tests

* Add new changelog file
2025-01-15 15:42:21 +00:00
Jun Song
34ff4c3ea9 Replace new exampleIP for example.org (#14795)
* Replace new exampleIP for example.org

* Add changelog
2025-01-15 06:14:25 +00:00
kasey
e8c968326a switch to unclog latest release instead of run artifact (#14793)
* use latest unclog release instead of run artifact

* add missing changelog entries to pre-unclog starter pack

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-01-14 21:47:20 +00:00
kasey
2000ef457b Move prysm to unclog for changelog management (#14782)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2025-01-14 18:22:38 +00:00
james-prysm
e36564c4d3 goethereum dependency to v1.14~ (#14351)
* updating the goethereum dependency

* fixing dependencies

* reverting workspace

* more fixes, work in progress

* trying with upgraded geth version

* fixing deprecated functions except for the time related ones on eth1 distance due to time issues

* fixing time issues

* gaz

* fixing test and upgrading some dependencies and reverting others

* Disable cgo in hid, delete old vendored usb library

* changelog

* rolling back dependencies

* fixing go mod tidy

* Geth v1.13.6

* fix tests

* Add ping interval, set to 500ms for tests. This didnt work

* Update to v1.14.8

* Spread it out to different bootnodes

* Fix it

* Remove Memsize

* Update all out of date dependencies

* Fix geth body change

* Fix Test

* Fix Build

* Fix Tests

* Fix Tests Again

* Fix Tests Again

* Fix Tests

* Fix Test

* Copy USB Package for HID

* Push it up

* Finally fix all tests with felix's changes

* updating geth dependency

* Update go-ethereum to v1.14.11

* fixing import

* reverting blob change

* fixing Implicit memory aliasing in for loop.

* WIP changes

* wip getting a little further on e2e runs

* getting a little further

* getting a little further

* setting everything to capella

* more partial fixes

* more fixes but still WIP

* fixing access list transactions"

* some cleanup

* making configs dynamic

* reverting time

* skip lower bound in builder

* updating to geth v1.14.12

* fixing verify blob to pointer

* go mod tidy

* fixing linting

* missed removing another terminal difficulty item

* another missed update

* updating more dependencies to fix cicd

* fixing holiman dependency update

* downgrading geth to 1.14.11 due to p2p loop issue

* reverting builder middleware caused by downgrade

* fixing more rollback issues

* upgrading back to 1.14.12 after discussing with preston

* mod tidy

* gofmt

* partial review feedback

* trying to start e2e from bellatrix instead

* reverting some changes

---------

Co-authored-by: Preston Van Loon <preston@pvl.dev>
Co-authored-by: nisdas <nishdas93@gmail.com>
2025-01-14 08:35:49 +00:00
terence
e5784d09f0 Update spec test to v1.5.0-beta.0 (#14788) 2025-01-13 20:05:34 +00:00
terence
e577bb0dcf Revert BlobSidecarsByRoot/Range version to v1 (#14785)
* Update blobs by range rpc topics to v1

* Update blobs by range rpc topics to v1

* RPC handler comments: Use "Added", "Modified" and "Upgraded".

- Added: No message with this message name was previously existing.
- Upgraded: A message with this message name was existing in the previous fork, but the schema version is upgraded in the current fork.
- Modified: The couple message name, schema version is the same than in the previous fork, but the implementation of the handler is modified in the current fork.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-01-13 16:58:44 +00:00
Radosław Kapka
153d1872ae Separate type for unaggregated network attestations (#14659)
* definitions and gossip

* validator

* broadcast

* broadcast the correct att depending on version

* small updates

* don't check bits after Electra

* nitpick

* tests

* changelog <3

* review

* more review

* review yet again

* try a different design

* fix gossip issues

* cleanup

* tests

* reduce cognitive complexity

* Preston's review

* move changelog entry to unreleased section

* fix pending atts pool issues

* reviews

* Potuz's comments

* test fixes
2025-01-13 16:48:20 +00:00
terence
80aa811ab9 Fix kzg commitment inclusion proof depth minimal value (#14787) 2025-01-13 15:51:30 +00:00
Bastin
39cf2c8f06 Read LC Bootstraps from DB (#14718)
* fix bootstrap handler

* add tests for get bootstrap handler

* changelog

* make CreateDefaultLightClientBootstrap private

* remove fulu test

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-01-10 16:51:55 +00:00
Bastin
e99df5e489 Save LC Bootstrap DB Space Optimization (#14775)
* add db optimization altair

* add db optimization

* add tests

* fix tests

* fix changelog

* remove debug code

* remove unused code

* fix comment

* remove unused code
2025-01-09 18:00:08 +00:00
oftenoccur
393e63e8e5 refactor: using slices.Contains to simplify the code (#14781)
Signed-off-by: oftenoccur <ezc5@sina.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2025-01-09 00:04:45 +00:00
Manu NALEPA
c48d40907c Add Fulu fork boilerplate (#14771)
* Prepare for future fork boilerplate.

* Implement the Fulu fork boilerplate.

* `Upgraded state to <fork> log`: Move from debug to info.

Rationale:
This log is the only one notifying the user a new fork happened.
A new fork is always a little bit stressful for a node operator.
Having at least one log indicating the client switched fork is something useful.

* Update testing/util/helpers.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Fix Radek's comment.

* Fix Radek's comment.

* Update beacon-chain/state/state-native/state_trie.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/state/state-native/state_trie.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Fix Radek's comment.

* Fix Radek's comment.

* Fix Radek's comment.

* Remove Electra struct type aliasing.

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-01-07 20:09:12 +00:00
Nishant Das
a21f544219 Update to v0.33 (#14780) 2025-01-07 09:20:45 +00:00
Preston Van Loon
705a9e8dcd Electra: Spec review of process_slashings (#14766)
* Electra: Review process_slashings

* Update signature not to return the state, there is no need

* Update CHANGELOG.md
2025-01-06 13:24:58 +00:00
Skylar Ray
2dcb015470 Update CHANGELOG.md (#14753) 2025-01-06 13:09:25 +00:00
Nishant Das
211e1a4b7c Close Streams For Metadata Requests Better (#14776)
* Close streams better

* Changelog
2025-01-06 11:01:58 +00:00
Nishant Das
1efca9c28d Add Ability to Trace IDONTWANT Control Messages (#14778)
* Trace IDONTWANT Requests

* Changelog
2025-01-06 10:13:22 +00:00
Nishant Das
97d7ca828b Security Update For Dependencies (#14777)
* Update Golang Crypto

* Changelog
2025-01-06 10:00:59 +00:00
Preston Van Loon
31df250496 Electra: Update spec definition for process_registry_updates (#14767)
* Electra: review process_registry_updates

* Update CHANGELOG.md
2025-01-03 16:15:02 +00:00
Manu NALEPA
8a439a6f5d Simplify next Fork boilerplate creation. (#14761)
* Simplify next Fork boilerplate creation.

* Update beacon-chain/blockchain/execution_engine.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/blockchain/execution_engine.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/blockchain/execution_engine.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/state/state-native/spec_parameters.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/state/state-native/spec_parameters.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/state/state-native/spec_parameters.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/state/state-native/spec_parameters.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Fix Radek's comments.

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-01-03 14:39:08 +00:00
Radosław Kapka
8cff9356f1 Remove duplicate imports (#14772) 2025-01-03 11:42:14 +00:00
Nishant Das
afeb05c9a1 Update Go-Libp2p-Pubsub (#14770)
* Update to Current Master Head

* Changelog

* Update CHANGELOG.md
2025-01-03 10:52:46 +00:00
Preston Van Loon
fa16232924 Electra: Update spec definition for process_epoch (#14768)
* Electra: Review process_epoch

* Update CHANGELOG.md
2025-01-03 09:40:53 +00:00
Preston Van Loon
1f720bdbf4 Fix all typos (#14769) 2025-01-03 09:40:13 +00:00
Brindrajsinh Chauhan
79ea77ff57 update go version to 1.22.10 (#14729)
* update to go 1.22.10

* update change log

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-01-02 23:07:38 +00:00
Radosław Kapka
d35cd0788e Remove v2 protos (#14765)
* Remove v2 protos

* changelog <3
2025-01-02 19:40:07 +00:00
Preston Van Loon
093e3df80a Remove slashingMultiplier argument from ProcessSlashings (#14762)
* Remove slashingMultiplier argument from ProcessSlashings

* Update CHANGELOG.md
2025-01-01 18:21:52 +00:00
Manu NALEPA
699a3b07a7 SSZ generation: Remove the // Hash: ... header. (#14760)
* `update-go-ssz.sh`: Remove the `// Hash: ...` line from the generated files header.

* Generate SSZ files.
2024-12-31 12:07:41 +00:00
Manu NALEPA
937d441e2e registerSubscribers: Register correctly. (#14759)
Issue before this commit:
The `currentSlot` in the `getSubnetsToSubscribe` is really set to the current slot. So, even if the `registerSubscribers` function is called
one epoch in advance, as done in `registerForUpcomingFork`, then
`currentSlot` will still be - really - the current slot.
==> The new blobs subnets will be subscribed only at the start of the
Electra fork, and not one epoch in advance.

With this commit:
The new blobs subnets will be effectively subscribed one epoch in advance.
2024-12-31 08:42:56 +00:00
terence
ead08d56d0 Implement EIP7691: Blob throughput increase (#14750)
* Add EIP-7691 blob throughput increase

* Review feedback

* Factorize blobs subscriptions.

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-12-30 19:23:56 +00:00
Savely
f55e62287a Fix Command Flag Typos and Syntax Issues (#14758)
* Update INTEROP.md

* Update INTEROP.md

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-12-30 12:29:43 +00:00
Manu NALEPA
c7b2838873 Re-organize the content of the *.proto files. (#14755)
* Re-organize thet content of the `*.proto` files.

The content of the `*.proto` files is sorted by hard fork,
then with a top-down fashion.

Sorting first by hard fork lets the reader to easily see new or modified fields.
Then, sorting with a top-down fashion lets the user to first see the big picture,
then to dive into details.

Also, the `new in <hard fork>` mentions are only written for the given hard fork.
Thus, it'll avoid in the future the majority of the fields, not initially
present in phase 0, to have the `new in <hard fork> mention`.

This commit does not bring any new functional change.

* Update proto/prysm/v1alpha1/attestation.proto

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update proto/prysm/v1alpha1/beacon_state.proto

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Fix Radek's comment.

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-12-30 12:11:10 +00:00
539 changed files with 36727 additions and 36894 deletions

View File

@@ -1 +1 @@
7.1.0
7.4.1

View File

@@ -33,5 +33,5 @@ Fixes #
**Acknowledgements**
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [ ] I have made an appropriate entry to [CHANGELOG.md](https://github.com/prysmaticlabs/prysm/blob/develop/CHANGELOG.md).
- [ ] I have included a uniquely named [changelog fragment file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.

View File

@@ -1,4 +1,4 @@
FROM golang:1.22-alpine
FROM golang:1.23-alpine
COPY entrypoint.sh /entrypoint.sh

View File

@@ -1,33 +1,34 @@
name: CI
# This workflow will build a golang project
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
name: changelog
on:
pull_request:
branches:
- develop
branches: [ "develop" ]
jobs:
changed_files:
runs-on: ubuntu-latest
name: Check CHANGELOG.md
run-changelog-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: changelog modified
id: changelog-modified
- name: Checkout source code
uses: actions/checkout@v3
- name: Download unclog binary
uses: dsaltares/fetch-gh-release-asset@aa2ab1243d6e0d5b405b973c89fa4d06a2d0fff7 # 1.1.2
with:
repo: OffchainLabs/unclog
version: "tags/v0.1.3"
file: "unclog"
- name: Get new changelog files
id: new-changelog-files
uses: tj-actions/changed-files@v45
with:
files: CHANGELOG.md
files: |
changelog/**.md
- name: List all changed files
- name: Run lint command
env:
ALL_CHANGED_FILES: ${{ steps.changelog-modified.outputs.all_changed_files }}
run: |
if [[ ${ALL_CHANGED_FILES[*]} =~ (^|[[:space:]])"CHANGELOG.md"($|[[:space:]]) ]];
then
echo "CHANGELOG.md was modified.";
exit 0;
else
echo "CHANGELOG.md was not modified.";
echo "Please see CHANGELOG.md and follow the instructions to add your changes to that file."
echo "In some rare scenarios, a changelog entry is not required and this CI check can be ignored."
exit 1;
fi
ALL_ADDED_MARKDOWN: ${{ steps.new-changelog-files.outputs.added_files }}
run: chmod +x unclog && ./unclog check -fragment-env=ALL_ADDED_MARKDOWN

21
.github/workflows/clang-format.yml vendored Normal file
View File

@@ -0,0 +1,21 @@
name: Protobuf Format
on:
push:
branches: [ '*' ]
pull_request:
branches: [ '*' ]
merge_group:
types: [checks_requested]
jobs:
clang-format-checking:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# Is this step failing for you?
# Run: clang-format -i proto/**/*.proto
# See: https://clang.llvm.org/docs/ClangFormat.html
- uses: RafikFarhad/clang-format-github-action@v3
with:
sources: "proto/**/*.proto"

View File

@@ -16,7 +16,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.22.3'
go-version: '1.23.5'
- id: list
uses: shogo82148/actions-go-fuzz/list@v0
with:
@@ -36,7 +36,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.22.3'
go-version: '1.23.5'
- uses: shogo82148/actions-go-fuzz/run@v0
with:
packages: ${{ matrix.package }}

View File

@@ -28,10 +28,10 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Go 1.22
- name: Set up Go 1.23
uses: actions/setup-go@v4
with:
go-version: '1.22.6'
go-version: '1.23.5'
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
@@ -45,16 +45,16 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Go 1.22
- name: Set up Go 1.23
uses: actions/setup-go@v4
with:
go-version: '1.22.6'
go-version: '1.23.5'
id: go
- name: Golangci-lint
uses: golangci/golangci-lint-action@v5
with:
version: v1.56.1
version: v1.63.4
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
@@ -64,7 +64,7 @@ jobs:
- name: Set up Go 1.x
uses: actions/setup-go@v4
with:
go-version: '1.22.6'
go-version: '1.23.5'
id: go
- name: Check out code into the Go module directory

View File

@@ -1,28 +1,20 @@
run:
skip-files:
timeout: 10m
go: '1.23.5'
issues:
exclude-files:
- validator/web/site_data.go
- .*_test.go
skip-dirs:
exclude-dirs:
- proto
- tools/analyzers
timeout: 10m
go: '1.22.6'
linters:
enable-all: true
disable:
# Deprecated linters:
- deadcode
- exhaustivestruct
- golint
- govet
- ifshort
- interfacer
- maligned
- nosnakecase
- scopelint
- structcheck
- varcheck
# Disabled for now:
- asasalint
@@ -34,6 +26,8 @@ linters:
- dogsled
- dupl
- durationcheck
- errname
- err113
- exhaustive
- exhaustruct
- forbidigo
@@ -47,17 +41,17 @@ linters:
- gocyclo
- godot
- godox
- goerr113
- gofumpt
- gomnd
- gomoddirectives
- gosec
- inamedparam
- interfacebloat
- intrange
- ireturn
- lll
- maintidx
- makezero
- mnd
- musttag
- nakedret
- nestif
@@ -72,6 +66,7 @@ linters:
- predeclared
- promlinter
- protogetter
- recvcheck
- revive
- spancheck
- staticcheck

View File

@@ -4,44 +4,6 @@ All notable changes to this project will be documented in this file.
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.2.0...HEAD)
### Added
- Added proper gas limit check for header from the builder.
- Added an error field to log `Finished building block`.
- Implemented a new `EmptyExecutionPayloadHeader` function.
- `Finished building block`: Display error only if not nil.
- Added support to update target and max blob count to different values per hard fork config.
- Log before blob filesystem cache warm-up.
- New design for the attestation pool. [PR](https://github.com/prysmaticlabs/prysm/pull/14324)
- Add field param placeholder for Electra blob target and max to pass spec tests.
### Changed
- Process light client finality updates only for new finalized epochs instead of doing it for every block.
- Refactor subnets subscriptions.
- Refactor RPC handlers subscriptions.
- Go deps upgrade, from `ioutil` to `io`
- Move successfully registered validator(s) on builder log to debug.
- Update some test files to use `crypto/rand` instead of `math/rand`
- Enforce Compound prefix (0x02) for target when processing pending consolidation request.
- Limit consolidating by validator's effective balance.
- Use 16-bit random value for proposer and sync committee selection filter.
### Deprecated
### Removed
### Fixed
- Added check to prevent nil pointer deference or out of bounds array access when validating the BLSToExecutionChange on an impossibly nil validator.
### Security
## [v5.2.0](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...v5.2.0)
Updating to this release is highly recommended, especially for users running v5.1.1 or v5.1.2.
@@ -98,7 +60,7 @@ Notable features:
- Updated the default `scrape-interval` in `Client-stats` to 2 minutes to accommodate Beaconcha.in API rate limits.
- Switch to compounding when consolidating with source==target.
- Revert block db save when saving state fails.
- Return false from HasBlock if the block is being synced.
- Return false from HasBlock if the block is being synced.
- Cleanup forkchoice on failed insertions.
- Use read only validator for core processing to avoid unnecessary copying.
- Use ROBlock across block processing pipeline.
@@ -111,7 +73,7 @@ Notable features:
- Simplified `EjectedValidatorIndices`.
- `engine_newPayloadV4`,`engine_getPayloadV4` are changes due to new execution request serialization decisions, [PR](https://github.com/prysmaticlabs/prysm/pull/14580)
- Fixed various small things in state-native code.
- Use ROBlock earlier in block syncing pipeline.
- Use ROBlock earlier in block syncing pipeline.
- Changed the signature of `ProcessPayload`.
- Only Build the Protobuf state once during serialization.
- Capella blocks are execution.
@@ -144,7 +106,7 @@ Notable features:
- Removed finalized validator index cache, no longer needed.
- Removed validator queue position log on key reload and wait for activation.
- Removed outdated spectest exclusions for EIP-6110.
- Removed support for starting a beacon node with a deterministic interop genesis state via interop flags. Alteratively, create a genesis state with prysmctl and use `--genesis-state`. This removes about 9Mb (~11%) of unnecessary code and dependencies from the final production binary.
- Removed support for starting a beacon node with a deterministic interop genesis state via interop flags. Alternatively, create a genesis state with prysmctl and use `--genesis-state`. This removes about 9Mb (~11%) of unnecessary code and dependencies from the final production binary.
- Removed kzg proof check from blob reconstructor.
### Fixed
@@ -158,7 +120,7 @@ Notable features:
- Fix `--backfill-oldest-slot` handling - this flag was totally broken, the code would always backfill to the default slot [pr](https://github.com/prysmaticlabs/prysm/pull/14584)
- Fix keymanager API should return corrected error format for malformed tokens
- Fix keymanager API so that get keys returns an empty response instead of a 500 error when using an unsupported keystore.
- Small log imporvement, removing some redundant or duplicate logs
- Small log improvement, removing some redundant or duplicate logs
- EIP7521 - Fixes withdrawal bug by accounting for pending partial withdrawals and deducting already withdrawn amounts from the sweep balance. [PR](https://github.com/prysmaticlabs/prysm/pull/14578)
- unskip electra merkle spec test
- Fix panic in validator REST mode when checking status after removing all keys
@@ -177,9 +139,9 @@ Notable features:
### Security
## [v5.1.2](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...v5.1.2) - 2024-10-16
## [v5.1.2](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...v5.1.2) - 2024-10-16
This is a hotfix release with one change.
This is a hotfix release with one change.
Prysm v5.1.1 contains an updated implementation of the beacon api streaming events endpoint. This
new implementation contains a bug that can cause a panic in certain conditions. The issue is
@@ -188,23 +150,23 @@ meantime we are issuing a patch that recovers from the panic to prevent the node
This only impacts the v5.1.1 release beacon api event stream endpoints. This endpoint is used by the
prysm REST mode validator (a feature which requires the validator to be configured to use the beacon
api intead of prysm's stock grpc endpoints) or accessory software that connects to the events api,
api instead of prysm's stock grpc endpoints) or accessory software that connects to the events api,
like https://github.com/ethpandaops/ethereum-metrics-exporter
### Fixed
### Fixed
- Recover from panics when writing the event stream [#14545](https://github.com/prysmaticlabs/prysm/pull/14545)
## [v5.1.1](https://github.com/prysmaticlabs/prysm/compare/v5.1.0...v5.1.1) - 2024-10-15
This release has a number of features and improvements. Most notably, the feature flag
`--enable-experimental-state` has been flipped to "opt out" via `--disable-experimental-state`.
This release has a number of features and improvements. Most notably, the feature flag
`--enable-experimental-state` has been flipped to "opt out" via `--disable-experimental-state`.
The experimental state management design has shown significant improvements in memory usage at
runtime. Updates to libp2p's gossipsub have some bandwidith stability improvements with support for
IDONTWANT control messages.
IDONTWANT control messages.
The gRPC gateway has been deprecated from Prysm in this release. If you need JSON data, consider the
standardized beacon-APIs.
standardized beacon-APIs.
Updating to this release is recommended at your convenience.
@@ -246,7 +208,7 @@ Updating to this release is recommended at your convenience.
- `grpc-gateway-corsdomain` is renamed to http-cors-domain. The old name can still be used as an alias.
- `api-timeout` is changed from int flag to duration flag, default value updated.
- Light client support: abstracted out the light client headers with different versions.
- `ApplyToEveryValidator` has been changed to prevent misuse bugs, it takes a closure that takes a `ReadOnlyValidator` and returns a raw pointer to a `Validator`.
- `ApplyToEveryValidator` has been changed to prevent misuse bugs, it takes a closure that takes a `ReadOnlyValidator` and returns a raw pointer to a `Validator`.
- Removed gorilla mux library and replaced it with net/http updates in go 1.22.
- Clean up `ProposeBlock` for validator client to reduce cognitive scoring and enable further changes.
- Updated k8s-io/client-go to v0.30.4 and k8s-io/apimachinery to v0.30.4
@@ -257,7 +219,7 @@ Updating to this release is recommended at your convenience.
- Updated Sepolia bootnodes.
- Make committee aware packing the default by deprecating `--enable-committee-aware-packing`.
- Moved `ConvertKzgCommitmentToVersionedHash` to the `primitives` package.
- Updated correlation penalty for EIP-7251.
- Updated correlation penalty for EIP-7251.
### Deprecated
- `--disable-grpc-gateway` flag is deprecated due to grpc gateway removal.
@@ -431,7 +393,7 @@ Updating to this release is recommended at your earliest convenience, especially
- use time.NewTimer() to avoid possible memory leaks
- paranoid underflow protection without error handling
- Fix CommitteeAssignments to not return every validator
- Fix dependent root retrival genesis case
- Fix dependent root retrieval genesis case
- Restrict Dials From Discovery
- Always close cache warm chan to prevent blocking
- Keep only the latest value in the health channel
@@ -583,7 +545,7 @@ block profit. If you want to preserve the existing behavior, set --local-block-v
- handle special case of batch size=1
- Always Set Inprogress Boolean In Cache
- Builder APIs: adding headers to post endpoint
- Rename mispelled variable
- Rename misspelled variable
- allow blob by root within da period
- Rewrite Pruning Implementation To Handle EIP 7045
- Set default fee recipient if tracked val fails
@@ -653,7 +615,7 @@ Known Issues
- Support beacon_committee_selections
- /eth/v1/beacon/deposit_snapshot
- Docker images now have coreutils pre-installed
- da_waited_time_milliseconds tracks total time waiting for data availablity check in ReceiveBlock
- da_waited_time_milliseconds tracks total time waiting for data availability check in ReceiveBlock
- blob_written, blob_disk_count, blob_disk_bytes new metrics for tracking blobs on disk
- Backfill supports blob backfilling
- Add mainnet deneb fork epoch config
@@ -731,34 +693,34 @@ AVX support (eg Celeron) after the Deneb fork. This is not an issue for mainnet.
- Linter: Wastedassign linter enabled to improve code quality.
- API Enhancements:
- Added payload return in Wei for /eth/v3/validator/blocks.
- Added Holesky Deneb Epoch for better epoch management.
- Added payload return in Wei for /eth/v3/validator/blocks.
- Added Holesky Deneb Epoch for better epoch management.
- Testing Enhancements:
- Clear cache in tests of core helpers to ensure test reliability.
- Added Debug State Transition Method for improved debugging.
- Backfilling test: Enabled backfill in E2E tests for more comprehensive coverage.
- Clear cache in tests of core helpers to ensure test reliability.
- Added Debug State Transition Method for improved debugging.
- Backfilling test: Enabled backfill in E2E tests for more comprehensive coverage.
- API Updates: Re-enabled jwt on keymanager API for enhanced security.
- Logging Improvements: Enhanced block by root log for better traceability.
- Validator Client Improvements:
- Added Spans to Core Validator Methods for enhanced monitoring.
- Improved readability in validator client code for better maintenance (various commits).
- Added Spans to Core Validator Methods for enhanced monitoring.
- Improved readability in validator client code for better maintenance (various commits).
### Changed
- Optimizations and Refinements:
- Lowered resource usage in certain processes for efficiency.
- Moved blob rpc validation closer to peer read for optimized processing.
- Cleaned up validate beacon block code for clarity and efficiency.
- Updated Sepolia Deneb fork epoch for alignment with network changes.
- Changed blob latency metrics to milliseconds for more precise measurement.
- Altered getLegacyDatabaseLocation message for better clarity.
- Improved wait for activation method for enhanced performance.
- Capitalized Aggregated Unaggregated Attestations Log for consistency.
- Modified HistoricalRoots usage for accuracy.
- Adjusted checking of attribute emptiness for efficiency.
- Lowered resource usage in certain processes for efficiency.
- Moved blob rpc validation closer to peer read for optimized processing.
- Cleaned up validate beacon block code for clarity and efficiency.
- Updated Sepolia Deneb fork epoch for alignment with network changes.
- Changed blob latency metrics to milliseconds for more precise measurement.
- Altered getLegacyDatabaseLocation message for better clarity.
- Improved wait for activation method for enhanced performance.
- Capitalized Aggregated Unaggregated Attestations Log for consistency.
- Modified HistoricalRoots usage for accuracy.
- Adjusted checking of attribute emptiness for efficiency.
- Database Management:
- Moved --db-backup-output-dir as a deprecated flag for database management simplification.
- Added the Ability to Defragment the Beacon State for improved database performance.
- Moved --db-backup-output-dir as a deprecated flag for database management simplification.
- Added the Ability to Defragment the Beacon State for improved database performance.
- Dependency Update: Bumped quic-go version from 0.39.3 to 0.39.4 for up-to-date dependencies.
### Removed
@@ -769,12 +731,12 @@ AVX support (eg Celeron) after the Deneb fork. This is not an issue for mainnet.
### Fixed
- Bug Fixes:
- Fixed off by one error for improved accuracy.
- Resolved small typo in error messages for clarity.
- Addressed minor issue in blsToExecChange validator for better validation.
- Corrected blobsidecar json tag for commitment inclusion proof.
- Fixed ssz post-requests content type check.
- Resolved issue with port logging in bootnode.
- Fixed off by one error for improved accuracy.
- Resolved small typo in error messages for clarity.
- Addressed minor issue in blsToExecChange validator for better validation.
- Corrected blobsidecar json tag for commitment inclusion proof.
- Fixed ssz post-requests content type check.
- Resolved issue with port logging in bootnode.
- Test Fixes: Re-enabled Slasher E2E Test for more comprehensive testing.
### Security
@@ -828,7 +790,7 @@ and Raspberry Pi users.
- Add Goerli Deneb Fork Epoch
- Use deneb key for deneb state in saveStatesEfficientInternal
- Initialize Inactivity Scores Correctly
- Excluse DA wait time for chain processing time
- Excludes DA wait time for chain processing time
- Initialize sig cache for verification.Initializer
- Verify roblobs
- KZG Commitment inclusion proof verifier
@@ -861,7 +823,7 @@ and Raspberry Pi users.
- Exit early if blob by root request is empty
- Request missing blobs while processing pending queue
- Check blob exists before requesting from peer
- Passing block as arugment for sidecar validation
- Passing block as argument for sidecar validation
#### Blob Management
@@ -1158,13 +1120,13 @@ _Most of the PRs here involve shifting our http endpoints to using vanilla http
- Remove no-op cancel func
- Update Terms of Service
- fix head slot in log
- DEPRECTATION: Remove exchange transition configuration call
- DEPRECATION: Remove exchange transition configuration call
- fix segmentation fork when Capella for epoch is MaxUint64
- Return Error Gracefully When Removing 4881 Flag
- Add zero length check on indices during NextSyncCommitteeIndices
- Replace Empty Slice Literals with Nil Slices
- Refactor Error String Formatting According to Go Best Practices
- Fix redundant type converstion
- Fix redundant type conversion
- docs: fix typo
- Add Clarification To Sync Committee Cache
- Fix typos
@@ -1186,7 +1148,7 @@ small set of users.
### Security
No security issues in thsi release.
No security issues in this release.
## [v4.1.0](https://github.com/prysmaticlabs/prysm/compare/v4.0.8...v4.1.0) - 2023-08-22
@@ -1201,9 +1163,9 @@ No security issues in thsi release.
now features runtime detection, automatically enabling optimized code paths if your CPU supports it.
- **Multiarch Containers Preview Available**: multiarch (:wave: arm64 support :wave:) containers will be offered for
preview at the following locations:
- Beacon Chain: [gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0](gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0)
- Validator: [gcr.io/prylabs-dev/prysm/validator:v4.1.0](gcr.io/prylabs-dev/prysm/validator:v4.1.0)
- Please note that in the next cycle, we will exclusively use these containers at the canonical URLs.
- Beacon Chain: [gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0](gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0)
- Validator: [gcr.io/prylabs-dev/prysm/validator:v4.1.0](gcr.io/prylabs-dev/prysm/validator:v4.1.0)
- Please note that in the next cycle, we will exclusively use these containers at the canonical URLs.
### Added
@@ -1702,7 +1664,7 @@ notes [here](https://hackmd.io/TtyFurRJRKuklG3n8lMO9Q). This release is **strong
Note: The released docker images are using the portable version of the blst cryptography library. The Prysm team will
release docker images with the non-portable blst library as the default image. In the meantime, you can compile docker
images with blst non-portable locally with the `--define=blst_modern=true` bazel flag, use the "-modern-" assets
attached to releases, or set environment varaible USE_PRYSM_MODERN=true when using prysm.sh.
attached to releases, or set environment variable USE_PRYSM_MODERN=true when using prysm.sh.
### Added
@@ -2051,7 +2013,7 @@ There are some known issues with this release.
- Beacon node can bootstrap from non-genesis state (i.e bellatrix state)
- Refactor bytesutil, add support for go1.20 slice to array conversions
- Add Span information for attestation record save request
- Matric addition
- Metric addition
- Identify invalid signature within batch verification
- Support for getting consensus values from beacon config
- EIP-4881: Spec implementation
@@ -2117,7 +2079,7 @@ See [flashbots/mev-boost#404](https://github.com/flashbots/mev-boost/issues/404)
- Added more histogram metrics for block arrival latency times block_arrival_latency_milliseconds
- Priority queue RetrieveByKey now uses read lock instead of write lock
- Use custom types for certain ethclient requests. Fixes an issue when using prysm on gnosis chain.
- Updted forkchoice endpoint /eth/v1/debug/forkchoice (was /eth/v1/debug/beacon/forkchoice)
- Updated forkchoice endpoint /eth/v1/debug/forkchoice (was /eth/v1/debug/beacon/forkchoice)
- Include empty fields in builder json client.
- Computing committee assignments for slots older than the oldest historical root in the beacon state is now forbidden
@@ -2349,7 +2311,7 @@ There are no security updates in this release.
removed: `GetBeaconState`, `ProduceBlock`, `ListForkChoiceHeads`, `ListBlocks`, `SubmitValidatorRegistration`, `GetBlock`, `ProposeBlock`
- API: Forkchoice method `GetForkChoice` has been removed.
- All previously deprecated feature flags have been
removed. `--enable-active-balance-cache`, `--correctly-prune-canonical-atts`, `--correctly-insert-orphaned-atts`, `--enable-next-slot-state-cache`, `--enable-batch-gossip-verification`, `--enable-get-block-optimizations`, `--enable-balance-trie-computation`, `--disable-next-slot-state-cache`, `--attestation-aggregation-strategy`, `--attestation-aggregation-force-opt-maxcover`, `--pyrmont`, `--disable-get-block-optimizations`, `--disable-proposer-atts-selection-using-max-cover`, `--disable-optimized-balance-update`, `--disable-active-balance-cache`, `--disable-balance-trie-computation`, `--disable-batch-gossip-verification`, `--disable-correctly-prune-canonical-atts`, `--disable-correctly-insert-orphaned-atts`, `--enable-native-state`, `--enable-peer-scorer`, `--enable-gossip-batch-aggregation`, `--experimental-disable-boundry-checks`
removed. `--enable-active-balance-cache`, `--correctly-prune-canonical-atts`, `--correctly-insert-orphaned-atts`, `--enable-next-slot-state-cache`, `--enable-batch-gossip-verification`, `--enable-get-block-optimizations`, `--enable-balance-trie-computation`, `--disable-next-slot-state-cache`, `--attestation-aggregation-strategy`, `--attestation-aggregation-force-opt-maxcover`, `--pyrmont`, `--disable-get-block-optimizations`, `--disable-proposer-atts-selection-using-max-cover`, `--disable-optimized-balance-update`, `--disable-active-balance-cache`, `--disable-balance-trie-computation`, `--disable-batch-gossip-verification`, `--disable-correctly-prune-canonical-atts`, `--disable-correctly-insert-orphaned-atts`, `--enable-native-state`, `--enable-peer-scorer`, `--enable-gossip-batch-aggregation`, `--experimental-disable-boundary-checks`
- Validator Web API: Removed unused ImportAccounts and DeleteAccounts rpc options
### Fixed
@@ -2565,7 +2527,7 @@ There are two known issues with this release:
- Bellatrix support. See [kiln testnet instructions](https://hackmd.io/OqIoTiQvS9KOIataIFksBQ?view)
- Weak subjectivity sync / checkpoint sync. This is an experimental feature and may have unintended side effects for
certain operators serving historical data. See
the [documentation](https://docs.prylabs.network/docs/next/prysm-usage/checkpoint-sync) for more details.
the [documentation](https://docs.prylabs.network/docs/prysm-usage/checkpoint-sync) for more details.
- A faster build of blst for beacon chain on linux amd64. Use the environment variable `USE_PRYSM_MODERN=true` with
prysm.sh, use the "modern" binary, or bazel build with `--define=blst_modern=true`.
- Vectorized sha256. This may have performance improvements with use of the new flag `--enable-vectorized-htr`.
@@ -2746,7 +2708,7 @@ notes [here](https://github.com/prysmaticlabs/prysm-web-ui/releases/tag/v1.0.0)
- Added uint64 overflow protection
- Sync committee pool returns empty slice instead of nil on cache miss
- Improved description of datadir flag
- Simplied web password requirements
- Simplified web password requirements
- Web JWT tokens no longer expire.
- Updated keymanager protos
- Watch and update jwt secret when auth token file updated on disk.
@@ -2757,7 +2719,7 @@ notes [here](https://github.com/prysmaticlabs/prysm-web-ui/releases/tag/v1.0.0)
- Refactor for weak subjectivity sync implementation
- Update naming for Atlair previous epoch attester
- Remove duplicate MerkleizeTrieLeaves method.
- Add explict error for validator flag checks on out of bound positions
- Add explicit error for validator flag checks on out of bound positions
- Simplify method to check if the beacon chain client should update the justified epoch value.
- Rename web UI performance endpoint to "summary"
- Refactor powchain service to be more functional
@@ -2783,7 +2745,7 @@ notes [here](https://github.com/prysmaticlabs/prysm-web-ui/releases/tag/v1.0.0)
Upstream go-ethereum is now used with familiar go.mod tooling.
- Removed duplicate aggergation validation p2p pipelines.
- Metrics calculation removed extra condition
- Removed superflous errors from peer scoring parameters registration
- Removed superfluous errors from peer scoring parameters registration
### Fixed
@@ -3025,4 +2987,4 @@ There are no security updates in this release.
# Older than v2.0.0
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases

View File

@@ -125,7 +125,7 @@ Navigate to your fork of the repo on GitHub. On the upper left where the current
**16. Add an entry to CHANGELOG.md.**
If your change is user facing, you must include a CHANGELOG.md entry. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
All PRs must must include a changelog fragment file in the `changelog` directory. If your change is not user-facing or should not be mentioned in the changelog for some other reason, you may use the `Ignored` changelog section in your fragment's header to satisfy this requirement without altering the final release changelog. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
**17. Create a pull request.**
@@ -177,16 +177,10 @@ $ git push myrepo feature-in-progress-branch -f
## Maintaining CHANGELOG.md
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/).
All PRs with user facing changes should have an entry in the CHANGELOG.md file and the change should be categorized in the appropriate category within the "Unreleased" section. The categories are:
- `Added` for new features.
- `Changed` for changes in existing functionality.
- `Deprecated` for soon-to-be removed features.
- `Removed` for now removed features.
- `Fixed` for any bug fixes.
- `Security` in case of vulnerabilities. Please see the [Security Policy](SECURITY.md) for responsible disclosure before adding a change with this category.
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/). In order to minimize conflicts and workflow headaches, we chose to implement a changelog management
strategy that uses changelog "fragment" files, managed by our changelog management tool called `unclog`. Each PR must include a new changelog fragment file in the `changelog` directory, as specified by unclog's
[README.md](https://github.com/OffchainLabs/unclog?tab=readme-ov-file#what-is-a-changelog-fragment). As the `unclog` README suggests in the [Best Practices](https://github.com/OffchainLabs/unclog?tab=readme-ov-file#best-practices) section,
the standard naming convention for your PR's fragment file, to avoid conflicting with another fragment file, is `changelog/<github user name>_<PR branch name>.md`.
### Releasing

View File

@@ -4,7 +4,7 @@ This README details how to setup Prysm for interop testing for usage with other
> [!IMPORTANT]
> This guide is likely to be outdated. The Prysm team does not have capacity to troubleshoot
> outdated interop guides or instructions. If you experience issues with this guide, please file and
> outdated interop guides or instructions. If you experience issues with this guide, please file an
> issue for visibility and propose fixes, if possible.
## Installation & Setup
@@ -74,19 +74,19 @@ bazel run //cmd/beacon-chain --config=minimal -- \
This will start the system with 256 validators. The flags used can be explained as such:
- `bazel run //cmd/beacon-chain --config=minimal` builds and runs the beacon node in minimal build configuration.
- `--` is a flag divider to distingish between bazel flags and flags that should be passed to the application. All flags and arguments after this divider are passed to the beacon chain.
- `--` is a flag divider to distinguish between bazel flags and flags that should be passed to the application. All flags and arguments after this divider are passed to the beacon chain.
- `--minimal-config` tells the beacon node to use minimal network configuration. This is different from the compile time state configuration flag `--config=minimal` and both are required.
- `--bootstrap-node=` disables the default bootstrap nodes. This prevents the client from attempting to peer with mainnet nodes.
- `--datadir=/tmp/beacon-chain-minimal-devnet` sets the data directory in a temporary location. Change this to your preferred destination.
- `--force-clear-db` will delete the beaconchain.db file without confirming with the user. This is helpful for iteratively running local devnets without changing the datadir, but less helpful for one off runs where there was no database in the data directory.
- `--min-sync-peers=0` allows the beacon node to skip initial sync without peers. This is essential because Prysm expects at least a few peers to start start the blockchain.
- `--min-sync-peers=0` allows the beacon node to skip initial sync without peers. This is essential because Prysm expects at least a few peers to start the blockchain.
- `--genesis-state=/tmp/genesis.ssz` defines the path to the generated genesis ssz file. The beacon node will use this as the initial genesis state.
- `--chain-config-file=/tmp/minimal.yaml` defines the path to the yaml file with the chain configuration.
As soon as the beacon node has started, start the validator in the other terminal window.
```
bazel run //cmd/validator --config=minimal -- --datadir=/tmp/validator --interopt-num-validators=256 --minimal-config --suggested-fee-recipient=0x8A04d14125D0FDCDc742F4A05C051De07232EDa4
bazel run //cmd/validator --config=minimal -- --datadir=/tmp/validator --interop-num-validators=256 --minimal-config --suggested-fee-recipient=0x8A04d14125D0FDCDc742F4A05C051De07232EDa4
```
This will launch and kickstart the system with your 256 validators performing their duties accordingly.

1689
MODULE.bazel.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -16,6 +16,34 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
rules_pkg_dependencies()
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "toolchains_protoc",
sha256 = "abb1540f8a9e045422730670ebb2f25b41fa56ca5a7cf795175a110a0a68f4ad",
strip_prefix = "toolchains_protoc-0.3.6",
url = "https://github.com/aspect-build/toolchains_protoc/releases/download/v0.3.6/toolchains_protoc-v0.3.6.tar.gz",
)
load("@toolchains_protoc//protoc:repositories.bzl", "rules_protoc_dependencies")
rules_protoc_dependencies()
load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies")
rules_proto_dependencies()
load("@bazel_features//:deps.bzl", "bazel_features_deps")
bazel_features_deps()
load("@toolchains_protoc//protoc:toolchain.bzl", "protoc_toolchains")
protoc_toolchains(
name = "protoc_toolchains",
version = "v25.3",
)
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
http_archive(
@@ -137,10 +165,10 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
sha256 = "b2038e2de2cace18f032249cb4bb0048abf583a36369fa98f687af1b3f880b26",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
],
)
@@ -182,7 +210,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.22.4",
go_version = "1.23.5",
nogo = "@//:nogo",
)
@@ -227,7 +255,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.5.0-alpha.10"
consensus_spec_version = "v1.5.0-beta.1"
bls_test_version = "v0.1.1"
@@ -243,7 +271,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-NtWIhbO/mVMb1edq5jqABL0o8R1tNFiuG8PCMAsUHcs=",
integrity = "sha256-R6r60geCfEjMaB1Ag3svaMFXFIgaJvkTJhfKsf76rFE=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -259,7 +287,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-DFlFlnzls1bBrDm+/xD8NK2ivvkhxR+rSNVLLqScVKc=",
integrity = "sha256-2Pem2gMHxW/6bBhZ2BaqkQruQSd/dTS3WMaMQO8rZ/o=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -275,7 +303,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-G9ENPF8udZL/BqRHbi60GhFPnZDPZAH6UjcjRiOlvbk=",
integrity = "sha256-5yP05JTV1MhcUZ2kSh+T+kXjG+uW3A5877veC5c1mD4=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -290,7 +318,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-ClOLKkmAcEi8/uKi6LDeqthask5+E3sgxVoA0bqmQ0c=",
integrity = "sha256-O6Rg6h19T0RsJs0sBDZ9O1k4LnCJ/gu2ilHijFBVfME=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -13,6 +13,7 @@ go_library(
deps = [
"//api:go_default_library",
"//api/client:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -27,6 +28,7 @@ go_library(
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",

View File

@@ -1,12 +1,12 @@
package builder
import (
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
@@ -22,7 +22,6 @@ type SignedBid interface {
// Bid is an interface describing the method set of a builder bid.
type Bid interface {
Header() (interfaces.ExecutionData, error)
BlobKzgCommitments() ([][]byte, error)
Value() primitives.Wei
Pubkey() []byte
Version() int
@@ -31,6 +30,18 @@ type Bid interface {
HashTreeRootWith(hh *ssz.Hasher) error
}
// BidDeneb is an interface that exposes newly added kzg commitments on top of builder bid
type BidDeneb interface {
Bid
BlobKzgCommitments() [][]byte
}
// BidElectra is an interface that exposes the newly added execution requests on top of the builder bid
type BidElectra interface {
BidDeneb
ExecutionRequests() *v1.ExecutionRequests
}
type signedBuilderBid struct {
p *ethpb.SignedBuilderBid
}
@@ -115,11 +126,6 @@ func (b builderBid) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
}
// BlobKzgCommitments --
func (b builderBid) BlobKzgCommitments() ([][]byte, error) {
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
}
// Version --
func (b builderBid) Version() int {
return version.Bellatrix
@@ -169,11 +175,6 @@ func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header)
}
// BlobKzgCommitments --
func (b builderBidCapella) BlobKzgCommitments() ([][]byte, error) {
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
}
// Version --
func (b builderBidCapella) Version() int {
return version.Capella
@@ -254,8 +255,8 @@ func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
}
// BlobKzgCommitments --
func (b builderBidDeneb) BlobKzgCommitments() ([][]byte, error) {
return b.p.BlobKzgCommitments, nil
func (b builderBidDeneb) BlobKzgCommitments() [][]byte {
return b.p.BlobKzgCommitments
}
type signedBuilderBidDeneb struct {
@@ -290,3 +291,95 @@ func (b signedBuilderBidDeneb) Version() int {
func (b signedBuilderBidDeneb) IsNil() bool {
return b.p == nil
}
type builderBidElectra struct {
p *ethpb.BuilderBidElectra
}
// WrappedBuilderBidElectra is a constructor which wraps a protobuf bid into an interface.
func WrappedBuilderBidElectra(p *ethpb.BuilderBidElectra) (Bid, error) {
w := builderBidElectra{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Version --
func (b builderBidElectra) Version() int {
return version.Electra
}
// Value --
func (b builderBidElectra) Value() primitives.Wei {
return primitives.LittleEndianBytesToWei(b.p.Value)
}
// Pubkey --
func (b builderBidElectra) Pubkey() []byte {
return b.p.Pubkey
}
// IsNil --
func (b builderBidElectra) IsNil() bool {
return b.p == nil
}
// HashTreeRoot --
func (b builderBidElectra) HashTreeRoot() ([32]byte, error) {
return b.p.HashTreeRoot()
}
// HashTreeRootWith --
func (b builderBidElectra) HashTreeRootWith(hh *ssz.Hasher) error {
return b.p.HashTreeRootWith(hh)
}
// Header --
func (b builderBidElectra) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header)
}
// ExecutionRequests --
func (b builderBidElectra) ExecutionRequests() *v1.ExecutionRequests {
return b.p.ExecutionRequests // does not copy
}
// BlobKzgCommitments --
func (b builderBidElectra) BlobKzgCommitments() [][]byte {
return b.p.BlobKzgCommitments
}
type signedBuilderBidElectra struct {
p *ethpb.SignedBuilderBidElectra
}
// WrappedSignedBuilderBidElectra is a constructor which wraps a protobuf signed bit into an interface.
func WrappedSignedBuilderBidElectra(p *ethpb.SignedBuilderBidElectra) (SignedBid, error) {
w := signedBuilderBidElectra{p: p}
if w.IsNil() {
return nil, consensus_types.ErrNilObjectWrapped
}
return w, nil
}
// Message --
func (b signedBuilderBidElectra) Message() (Bid, error) {
return WrappedBuilderBidElectra(b.p.Message)
}
// Signature --
func (b signedBuilderBidElectra) Signature() []byte {
return b.p.Signature
}
// Version --
func (b signedBuilderBidElectra) Version() int {
return version.Electra
}
// IsNil --
func (b signedBuilderBidElectra) IsNil() bool {
return b.p == nil
}

View File

@@ -219,8 +219,23 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
if err := json.Unmarshal(hb, v); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
switch strings.ToLower(v.Version) {
case strings.ToLower(version.String(version.Deneb)):
ver, err := version.FromString(strings.ToLower(v.Version))
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("unsupported header version %s", strings.ToLower(v.Version)))
}
if ver >= version.Electra {
hr := &ExecHeaderResponseElectra{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
}
p, err := hr.ToProto()
if err != nil {
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidElectra(p)
}
if ver >= version.Deneb {
hr := &ExecHeaderResponseDeneb{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
@@ -230,7 +245,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidDeneb(p)
case strings.ToLower(version.String(version.Capella)):
}
if ver >= version.Capella {
hr := &ExecHeaderResponseCapella{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
@@ -240,7 +256,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
return nil, errors.Wrapf(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBidCapella(p)
case strings.ToLower(version.String(version.Bellatrix)):
}
if ver >= version.Bellatrix {
hr := &ExecHeaderResponse{}
if err := json.Unmarshal(hb, hr); err != nil {
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
@@ -250,9 +267,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
return nil, errors.Wrap(err, "could not extract proto message from header")
}
return WrappedSignedBuilderBid(p)
default:
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
}
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
}
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte

View File

@@ -16,7 +16,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -124,7 +123,7 @@ func TestClient_RegisterValidator(t *testing.T) {
func TestClient_GetHeader(t *testing.T) {
ctx := context.Background()
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
var slot types.Slot = 23
var slot primitives.Slot = 23
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
t.Run("server error", func(t *testing.T) {
@@ -267,9 +266,9 @@ func TestClient_GetHeader(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
kcgCommitments, err := bid.BlobKzgCommitments()
require.NoError(t, err)
dbid, ok := bid.(builderBidDeneb)
require.Equal(t, true, ok)
kcgCommitments := dbid.BlobKzgCommitments()
require.Equal(t, len(kcgCommitments) > 0, true)
for i := range kcgCommitments {
require.Equal(t, len(kcgCommitments[i]) == 48, true)
@@ -293,6 +292,50 @@ func TestClient_GetHeader(t *testing.T) {
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorContains(t, "could not extract proto message from header: too many blob commitments: 7", err)
})
t.Run("electra", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseElectra)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.NoError(t, err)
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
bid, err := h.Message()
require.NoError(t, err)
bidHeader, err := bid.Header()
require.NoError(t, err)
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
value, err := stringToUint256(bidStr)
require.NoError(t, err)
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
ebid, ok := bid.(builderBidElectra)
require.Equal(t, true, ok)
kcgCommitments := ebid.BlobKzgCommitments()
require.Equal(t, len(kcgCommitments) > 0, true)
for i := range kcgCommitments {
require.Equal(t, len(kcgCommitments[i]) == 48, true)
}
requests := ebid.ExecutionRequests()
require.Equal(t, 1, len(requests.Deposits))
require.Equal(t, 1, len(requests.Withdrawals))
require.Equal(t, 1, len(requests.Consolidations))
})
t.Run("unsupported version", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -370,7 +413,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.Equal(t, primitives.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
})
@@ -409,7 +452,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 1, len(withdrawals))
assert.Equal(t, uint64(1), withdrawals[0].Index)
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.Equal(t, primitives.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
assert.Equal(t, uint64(1), withdrawals[0].Amount)
require.NotNil(t, blobBundle)

View File

@@ -5,13 +5,15 @@ import (
"fmt"
"math/big"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/math"
@@ -414,54 +416,10 @@ func FromProtoDeneb(payload *v1.ExecutionPayloadDeneb) (ExecutionPayloadDeneb, e
}, nil
}
var errInvalidTypeConversion = errors.New("unable to translate between api and foreign type")
// ExecutionPayloadResponseFromData converts an ExecutionData interface value to a payload response.
// This involves serializing the execution payload value so that the abstract payload envelope can be used.
func ExecutionPayloadResponseFromData(ed interfaces.ExecutionData, bundle *v1.BlobsBundle) (*ExecutionPayloadResponse, error) {
pb := ed.Proto()
var data interface{}
var err error
var ver string
switch pbStruct := pb.(type) {
case *v1.ExecutionPayload:
ver = version.String(version.Bellatrix)
data, err = FromProto(pbStruct)
if err != nil {
return nil, errors.Wrap(err, "failed to convert a Bellatrix ExecutionPayload to an API response")
}
case *v1.ExecutionPayloadCapella:
ver = version.String(version.Capella)
data, err = FromProtoCapella(pbStruct)
if err != nil {
return nil, errors.Wrap(err, "failed to convert a Capella ExecutionPayload to an API response")
}
case *v1.ExecutionPayloadDeneb:
ver = version.String(version.Deneb)
payloadStruct, err := FromProtoDeneb(pbStruct)
if err != nil {
return nil, errors.Wrap(err, "failed to convert a Deneb ExecutionPayload to an API response")
}
data = &ExecutionPayloadDenebAndBlobsBundle{
ExecutionPayload: &payloadStruct,
BlobsBundle: FromBundleProto(bundle),
}
default:
return nil, errInvalidTypeConversion
}
encoded, err := json.Marshal(data)
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal execution payload version=%s", ver)
}
return &ExecutionPayloadResponse{
Version: ver,
Data: encoded,
}, nil
}
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
type ExecHeaderResponseCapella struct {
Data struct {
Version string `json:"version"`
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBidCapella `json:"message"`
} `json:"data"`
@@ -605,17 +563,25 @@ type BlobBundler interface {
BundleProto() (*v1.BlobsBundle, error)
}
// ParsedExecutionRequests can retrieve the underlying execution requests for the given execution payload response.
type ParsedExecutionRequests interface {
ExecutionRequestsProto() (*v1.ExecutionRequests, error)
}
func (r *ExecutionPayloadResponse) ParsePayload() (ParsedPayload, error) {
var toProto ParsedPayload
switch r.Version {
case version.String(version.Bellatrix):
toProto = &ExecutionPayload{}
case version.String(version.Capella):
toProto = &ExecutionPayloadCapella{}
case version.String(version.Deneb):
v, err := version.FromString(strings.ToLower(r.Version))
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("unsupported version %s", strings.ToLower(r.Version)))
}
if v >= version.Deneb {
toProto = &ExecutionPayloadDenebAndBlobsBundle{}
default:
return nil, consensusblocks.ErrUnsupportedVersion
} else if v >= version.Capella {
toProto = &ExecutionPayloadCapella{}
} else if v >= version.Bellatrix {
toProto = &ExecutionPayload{}
} else {
return nil, fmt.Errorf("unsupported version %s", strings.ToLower(r.Version))
}
if err := json.Unmarshal(r.Data, toProto); err != nil {
@@ -990,7 +956,8 @@ func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
// ExecHeaderResponseDeneb is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
type ExecHeaderResponseDeneb struct {
Data struct {
Version string `json:"version"`
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBidDeneb `json:"message"`
} `json:"data"`
@@ -1307,6 +1274,208 @@ func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
}, nil
}
// ExecHeaderResponseElectra is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
type ExecHeaderResponseElectra struct {
Version string `json:"version"`
Data struct {
Signature hexutil.Bytes `json:"signature"`
Message *BuilderBidElectra `json:"message"`
} `json:"data"`
}
// ToProto creates a SignedBuilderBidElectra Proto from ExecHeaderResponseElectra.
func (ehr *ExecHeaderResponseElectra) ToProto() (*eth.SignedBuilderBidElectra, error) {
bb, err := ehr.Data.Message.ToProto()
if err != nil {
return nil, err
}
return &eth.SignedBuilderBidElectra{
Message: bb,
Signature: bytesutil.SafeCopyBytes(ehr.Data.Signature),
}, nil
}
// ToProto creates a BuilderBidElectra Proto from BuilderBidElectra.
func (bb *BuilderBidElectra) ToProto() (*eth.BuilderBidElectra, error) {
header, err := bb.Header.ToProto()
if err != nil {
return nil, err
}
if len(bb.BlobKzgCommitments) > params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra) {
return nil, fmt.Errorf("blob commitment count %d exceeds the maximum %d", len(bb.BlobKzgCommitments), params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra))
}
kzgCommitments := make([][]byte, len(bb.BlobKzgCommitments))
for i, commit := range bb.BlobKzgCommitments {
if len(commit) != fieldparams.BLSPubkeyLength {
return nil, fmt.Errorf("commitment length %d is not %d", len(commit), fieldparams.BLSPubkeyLength)
}
kzgCommitments[i] = bytesutil.SafeCopyBytes(commit)
}
// post electra execution requests should not be nil, if no requests exist use an empty request
if bb.ExecutionRequests == nil {
return nil, errors.New("bid contains nil execution requests")
}
executionRequests, err := bb.ExecutionRequests.ToProto()
if err != nil {
return nil, errors.Wrap(err, "failed to convert ExecutionRequests")
}
return &eth.BuilderBidElectra{
Header: header,
BlobKzgCommitments: kzgCommitments,
ExecutionRequests: executionRequests,
// Note that SSZBytes() reverses byte order for the little-endian representation.
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
}, nil
}
// ExecutionRequestsV1 is a wrapper for different execution requests
type ExecutionRequestsV1 struct {
Deposits []*DepositRequestV1 `json:"deposits"`
Withdrawals []*WithdrawalRequestV1 `json:"withdrawals"`
Consolidations []*ConsolidationRequestV1 `json:"consolidations"`
}
func (er *ExecutionRequestsV1) ToProto() (*v1.ExecutionRequests, error) {
if uint64(len(er.Deposits)) > params.BeaconConfig().MaxDepositRequestsPerPayload {
return nil, fmt.Errorf("deposit requests count %d exceeds the maximum %d", len(er.Deposits), params.BeaconConfig().MaxDepositRequestsPerPayload)
}
deposits := make([]*v1.DepositRequest, len(er.Deposits))
for i, dep := range er.Deposits {
d, err := dep.ToProto()
if err != nil {
return nil, err
}
deposits[i] = d
}
if uint64(len(er.Withdrawals)) > params.BeaconConfig().MaxWithdrawalRequestsPerPayload {
return nil, fmt.Errorf("withdrawal requests count %d exceeds the maximum %d", len(er.Withdrawals), params.BeaconConfig().MaxWithdrawalRequestsPerPayload)
}
withdrawals := make([]*v1.WithdrawalRequest, len(er.Withdrawals))
for i, wr := range er.Withdrawals {
w, err := wr.ToProto()
if err != nil {
return nil, err
}
withdrawals[i] = w
}
if uint64(len(er.Consolidations)) > params.BeaconConfig().MaxConsolidationsRequestsPerPayload {
return nil, fmt.Errorf("consolidation requests count %d exceeds the maximum %d", len(er.Consolidations), params.BeaconConfig().MaxConsolidationsRequestsPerPayload)
}
consolidations := make([]*v1.ConsolidationRequest, len(er.Consolidations))
for i, con := range er.Consolidations {
c, err := con.ToProto()
if err != nil {
return nil, err
}
consolidations[i] = c
}
return &v1.ExecutionRequests{
Deposits: deposits,
Withdrawals: withdrawals,
Consolidations: consolidations,
}, nil
}
// BuilderBidElectra is a field of ExecHeaderResponseElectra.
type BuilderBidElectra struct {
Header *ExecutionPayloadHeaderDeneb `json:"header"`
BlobKzgCommitments []hexutil.Bytes `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequestsV1 `json:"execution_requests"`
Value Uint256 `json:"value"`
Pubkey hexutil.Bytes `json:"pubkey"`
}
// WithdrawalRequestV1 is a field of ExecutionRequestsV1.
type WithdrawalRequestV1 struct {
SourceAddress hexutil.Bytes `json:"source_address"`
ValidatorPubkey hexutil.Bytes `json:"validator_pubkey"`
Amount Uint256 `json:"amount"`
}
func (wr *WithdrawalRequestV1) ToProto() (*v1.WithdrawalRequest, error) {
srcAddress, err := bytesutil.DecodeHexWithLength(wr.SourceAddress.String(), common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "source_address")
}
pubkey, err := bytesutil.DecodeHexWithLength(wr.ValidatorPubkey.String(), fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "validator_pubkey")
}
return &v1.WithdrawalRequest{
SourceAddress: srcAddress,
ValidatorPubkey: pubkey,
Amount: wr.Amount.Uint64(),
}, nil
}
// DepositRequestV1 is a field of ExecutionRequestsV1.
type DepositRequestV1 struct {
PubKey hexutil.Bytes `json:"pubkey"`
// withdrawalCredentials: DATA, 32 Bytes
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"`
// amount: QUANTITY, 64 Bits
Amount Uint256 `json:"amount"`
// signature: DATA, 96 Bytes
Signature hexutil.Bytes `json:"signature"`
// index: QUANTITY, 64 Bits
Index Uint256 `json:"index"`
}
func (dr *DepositRequestV1) ToProto() (*v1.DepositRequest, error) {
pubkey, err := bytesutil.DecodeHexWithLength(dr.PubKey.String(), fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "pubkey")
}
wc, err := bytesutil.DecodeHexWithLength(dr.WithdrawalCredentials.String(), fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "withdrawal_credentials")
}
sig, err := bytesutil.DecodeHexWithLength(dr.Signature.String(), fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "signature")
}
return &v1.DepositRequest{
Pubkey: pubkey,
WithdrawalCredentials: wc,
Amount: dr.Amount.Uint64(),
Signature: sig,
Index: dr.Index.Uint64(),
}, nil
}
// ConsolidationRequestV1 is a field of ExecutionRequestsV1.
type ConsolidationRequestV1 struct {
// sourceAddress: DATA, 20 Bytes
SourceAddress hexutil.Bytes `json:"source_address"`
// sourcePubkey: DATA, 48 Bytes
SourcePubkey hexutil.Bytes `json:"source_pubkey"`
// targetPubkey: DATA, 48 Bytes
TargetPubkey hexutil.Bytes `json:"target_pubkey"`
}
func (cr *ConsolidationRequestV1) ToProto() (*v1.ConsolidationRequest, error) {
srcAddress, err := bytesutil.DecodeHexWithLength(cr.SourceAddress.String(), common.AddressLength)
if err != nil {
return nil, server.NewDecodeError(err, "source_address")
}
sourcePubkey, err := bytesutil.DecodeHexWithLength(cr.SourcePubkey.String(), fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "source_pubkey")
}
targetPubkey, err := bytesutil.DecodeHexWithLength(cr.TargetPubkey.String(), fieldparams.BLSPubkeyLength)
if err != nil {
return nil, server.NewDecodeError(err, "target_pubkey")
}
return &v1.ConsolidationRequest{
SourceAddress: srcAddress,
SourcePubkey: sourcePubkey,
TargetPubkey: targetPubkey,
}, nil
}
// ErrorMessage is a JSON representation of the builder API's returned error message.
type ErrorMessage struct {
Code int `json:"code"`

View File

@@ -154,6 +154,64 @@ var testExampleHeaderResponseDeneb = `{
}
}`
var testExampleHeaderResponseElectra = `{
"version": "electra",
"data": {
"message": {
"header": {
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "1",
"blob_gas_used": "1",
"excess_blob_gas": "1",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
},
"blob_kzg_commitments": [
"0xa94170080872584e54a1cf092d845703b13907f2e6b3b1c0ad573b910530499e3bcd48c6378846b80d2bfa58c81cf3d5"
],
"execution_requests": {
"deposits": [
{
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
"withdrawal_credentials": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"amount": "1",
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
"index": "1"
}
],
"withdrawals": [
{
"source_address": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"validator_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
"amount": "1"
}
],
"consolidations": [
{
"source_address": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"source_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
"target_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
}
]
},
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
},
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
}
}`
var testExampleHeaderResponseDenebNoBundle = `{
"version": "deneb",
"data": {
@@ -1924,9 +1982,9 @@ func TestEmptyResponseBody(t *testing.T) {
emptyResponse := &ExecutionPayloadResponse{}
require.NoError(t, json.Unmarshal(empty, emptyResponse))
_, err := emptyResponse.ParsePayload()
require.ErrorIs(t, err, consensusblocks.ErrUnsupportedVersion)
require.ErrorContains(t, "unsupported version", err)
})
versions := []int{version.Bellatrix, version.Capella, version.Deneb}
versions := []int{version.Bellatrix, version.Capella, version.Deneb, version.Electra}
for i := range versions {
vstr := version.String(versions[i])
t.Run("populated version without payload"+vstr, func(t *testing.T) {

View File

@@ -74,7 +74,7 @@ func AppendHeaders(parent context.Context, headers []string) context.Context {
logrus.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
continue
}
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "="))
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "=")) // nolint:fatcontext
}
}
return parent

View File

@@ -28,6 +28,7 @@ go_library(
"//api/server:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",

View File

@@ -14,6 +14,10 @@ type SignedMessageJsoner interface {
SigString() string
}
// ----------------------------------------------------------------------------
// Phase 0
// ----------------------------------------------------------------------------
type SignedBeaconBlock struct {
Message *BeaconBlock `json:"message"`
Signature string `json:"signature"`
@@ -48,6 +52,29 @@ type BeaconBlockBody struct {
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
}
type SignedBeaconBlockHeaderContainer struct {
Header *SignedBeaconBlockHeader `json:"header"`
Root string `json:"root"`
Canonical bool `json:"canonical"`
}
type SignedBeaconBlockHeader struct {
Message *BeaconBlockHeader `json:"message"`
Signature string `json:"signature"`
}
type BeaconBlockHeader struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
BodyRoot string `json:"body_root"`
}
// ----------------------------------------------------------------------------
// Altair
// ----------------------------------------------------------------------------
type SignedBeaconBlockAltair struct {
Message *BeaconBlockAltair `json:"message"`
Signature string `json:"signature"`
@@ -83,6 +110,10 @@ type BeaconBlockBodyAltair struct {
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
}
// ----------------------------------------------------------------------------
// Bellatrix
// ----------------------------------------------------------------------------
type SignedBeaconBlockBellatrix struct {
Message *BeaconBlockBellatrix `json:"message"`
Signature string `json:"signature"`
@@ -155,6 +186,44 @@ type BlindedBeaconBlockBodyBellatrix struct {
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
}
type ExecutionPayload struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
}
type ExecutionPayloadHeader struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
}
// ----------------------------------------------------------------------------
// Capella
// ----------------------------------------------------------------------------
type SignedBeaconBlockCapella struct {
Message *BeaconBlockCapella `json:"message"`
Signature string `json:"signature"`
@@ -229,6 +298,46 @@ type BlindedBeaconBlockBodyCapella struct {
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
}
type ExecutionPayloadCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
}
type ExecutionPayloadHeaderCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
}
// ----------------------------------------------------------------------------
// Deneb
// ----------------------------------------------------------------------------
type SignedBeaconBlockContentsDeneb struct {
SignedBlock *SignedBeaconBlockDeneb `json:"signed_block"`
KzgProofs []string `json:"kzg_proofs"`
@@ -317,6 +426,50 @@ type BlindedBeaconBlockBodyDeneb struct {
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
}
type ExecutionPayloadDeneb struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
}
type ExecutionPayloadHeaderDeneb struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
}
// ----------------------------------------------------------------------------
// Electra
// ----------------------------------------------------------------------------
type SignedBeaconBlockContentsElectra struct {
SignedBlock *SignedBeaconBlockElectra `json:"signed_block"`
KzgProofs []string `json:"kzg_proofs"`
@@ -362,7 +515,7 @@ type BeaconBlockBodyElectra struct {
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayload *ExecutionPayloadElectra `json:"execution_payload"`
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
@@ -392,156 +545,119 @@ func (s *SignedBlindedBeaconBlockElectra) SigString() string {
}
type BlindedBeaconBlockBodyElectra struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"execution_payload_header"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"execution_payload_header"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
}
type SignedBeaconBlockHeaderContainer struct {
Header *SignedBeaconBlockHeader `json:"header"`
Root string `json:"root"`
Canonical bool `json:"canonical"`
type (
ExecutionRequests struct {
Deposits []*DepositRequest `json:"deposits"`
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
Consolidations []*ConsolidationRequest `json:"consolidations"`
}
)
// ----------------------------------------------------------------------------
// Fulu
// ----------------------------------------------------------------------------
type SignedBeaconBlockContentsFulu struct {
SignedBlock *SignedBeaconBlockFulu `json:"signed_block"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type SignedBeaconBlockHeader struct {
Message *BeaconBlockHeader `json:"message"`
Signature string `json:"signature"`
type BeaconBlockContentsFulu struct {
Block *BeaconBlockFulu `json:"block"`
KzgProofs []string `json:"kzg_proofs"`
Blobs []string `json:"blobs"`
}
type BeaconBlockHeader struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
BodyRoot string `json:"body_root"`
type SignedBeaconBlockFulu struct {
Message *BeaconBlockFulu `json:"message"`
Signature string `json:"signature"`
}
type ExecutionPayload struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
var _ SignedMessageJsoner = &SignedBeaconBlockFulu{}
func (s *SignedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
type ExecutionPayloadHeader struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
func (s *SignedBeaconBlockFulu) SigString() string {
return s.Signature
}
type ExecutionPayloadCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
type BeaconBlockFulu struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
Body *BeaconBlockBodyFulu `json:"body"`
}
type ExecutionPayloadHeaderCapella struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
type BeaconBlockBodyFulu struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
}
type ExecutionPayloadDeneb struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
Transactions []string `json:"transactions"`
Withdrawals []*Withdrawal `json:"withdrawals"`
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
type BlindedBeaconBlockFulu struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
Body *BlindedBeaconBlockBodyFulu `json:"body"`
}
type ExecutionPayloadElectra = ExecutionPayloadDeneb
type ExecutionPayloadHeaderDeneb struct {
ParentHash string `json:"parent_hash"`
FeeRecipient string `json:"fee_recipient"`
StateRoot string `json:"state_root"`
ReceiptsRoot string `json:"receipts_root"`
LogsBloom string `json:"logs_bloom"`
PrevRandao string `json:"prev_randao"`
BlockNumber string `json:"block_number"`
GasLimit string `json:"gas_limit"`
GasUsed string `json:"gas_used"`
Timestamp string `json:"timestamp"`
ExtraData string `json:"extra_data"`
BaseFeePerGas string `json:"base_fee_per_gas"`
BlockHash string `json:"block_hash"`
TransactionsRoot string `json:"transactions_root"`
WithdrawalsRoot string `json:"withdrawals_root"`
BlobGasUsed string `json:"blob_gas_used"`
ExcessBlobGas string `json:"excess_blob_gas"`
type SignedBlindedBeaconBlockFulu struct {
Message *BlindedBeaconBlockFulu `json:"message"`
Signature string `json:"signature"`
}
type ExecutionPayloadHeaderElectra = ExecutionPayloadHeaderDeneb
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockFulu{}
type ExecutionRequests struct {
Deposits []*DepositRequest `json:"deposits"`
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
Consolidations []*ConsolidationRequest `json:"consolidations"`
func (s *SignedBlindedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
return s.Signature
}
type BlindedBeaconBlockBodyFulu struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
ExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"execution_payload_header"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/server"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v5/container/slice"
@@ -243,7 +244,7 @@ func (c *ContributionAndProof) ToConsensus() (*eth.ContributionAndProof, error)
if err != nil {
return nil, server.NewDecodeError(err, "AggregatorIndex")
}
selectionProof, err := bytesutil.DecodeHexWithLength(c.SelectionProof, 96)
selectionProof, err := bytesutil.DecodeHexWithLength(c.SelectionProof, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "SelectionProof")
}
@@ -330,7 +331,7 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
if err != nil {
return nil, server.NewDecodeError(err, "Aggregate")
}
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "SelectionProof")
}
@@ -366,7 +367,7 @@ func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttes
if err != nil {
return nil, server.NewDecodeError(err, "Aggregate")
}
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "SelectionProof")
}
@@ -432,6 +433,32 @@ func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
}, nil
}
func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
ci, err := strconv.ParseUint(a.CommitteeIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "CommitteeIndex")
}
ai, err := strconv.ParseUint(a.AttesterIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "AttesterIndex")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.SingleAttestation{
CommitteeId: primitives.CommitteeIndex(ci),
AttesterIndex: primitives.ValidatorIndex(ai),
Data: data,
Signature: sig,
}, nil
}
func AttElectraFromConsensus(a *eth.AttestationElectra) *AttestationElectra {
return &AttestationElectra{
AggregationBits: hexutil.Encode(a.AggregationBits),
@@ -708,6 +735,10 @@ func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, e
}
func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
if err := slice.VerifyMaxLength(a.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee); err != nil {
return nil, err
}
indices := make([]uint64, len(a.AttestingIndices))
var err error
for i, ix := range a.AttestingIndices {
@@ -733,6 +764,13 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
}
func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectra, error) {
if err := slice.VerifyMaxLength(
a.AttestingIndices,
params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot,
); err != nil {
return nil, err
}
indices := make([]uint64, len(a.AttestingIndices))
var err error
for i, ix := range a.AttestingIndices {
@@ -1163,7 +1201,7 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
if src == nil {
return nil, errNilValue
}
err := slice.VerifyMaxLength(src, 2)
err := slice.VerifyMaxLength(src, fieldparams.MaxAttesterSlashingsElectra)
if err != nil {
return nil, err
}
@@ -1184,7 +1222,7 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
}
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, 2048)
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices", i))
}
@@ -1204,7 +1242,7 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Signature", i))
}
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, 2048)
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot)
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices", i))
}

File diff suppressed because it is too large Load Diff

View File

@@ -176,9 +176,9 @@ func lightClientHeaderToJSON(header interfaces.LightClientHeader) (json.RawMessa
if err != nil {
return nil, err
}
ex, ok := exInterface.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
ex, ok := exInterface.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
if !ok {
return nil, fmt.Errorf("execution data is not %T", &enginev1.ExecutionPayloadHeaderElectra{})
return nil, fmt.Errorf("execution data is not %T", &enginev1.ExecutionPayloadHeaderDeneb{})
}
execution, err := ExecutionPayloadHeaderElectraFromConsensus(ex)
if err != nil {

View File

@@ -11,6 +11,10 @@ import (
var errPayloadHeaderNotFound = errors.New("expected payload header not found")
// ----------------------------------------------------------------------------
// Phase 0
// ----------------------------------------------------------------------------
func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -97,6 +101,10 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
}, nil
}
// ----------------------------------------------------------------------------
// Altair
// ----------------------------------------------------------------------------
func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAltair, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -202,6 +210,10 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
}, nil
}
// ----------------------------------------------------------------------------
// Bellatrix
// ----------------------------------------------------------------------------
func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconStateBellatrix, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -320,6 +332,10 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
}, nil
}
// ----------------------------------------------------------------------------
// Capella
// ----------------------------------------------------------------------------
func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCapella, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -457,6 +473,10 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
}, nil
}
// ----------------------------------------------------------------------------
// Deneb
// ----------------------------------------------------------------------------
func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDeneb, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -594,6 +614,10 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
}, nil
}
// ----------------------------------------------------------------------------
// Electra
// ----------------------------------------------------------------------------
func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateElectra, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
@@ -775,3 +799,189 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
}, nil
}
// ----------------------------------------------------------------------------
// Fulu
// ----------------------------------------------------------------------------
func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu, error) {
srcBr := st.BlockRoots()
br := make([]string, len(srcBr))
for i, r := range srcBr {
br[i] = hexutil.Encode(r)
}
srcSr := st.StateRoots()
sr := make([]string, len(srcSr))
for i, r := range srcSr {
sr[i] = hexutil.Encode(r)
}
srcHr, err := st.HistoricalRoots()
if err != nil {
return nil, err
}
hr := make([]string, len(srcHr))
for i, r := range srcHr {
hr[i] = hexutil.Encode(r)
}
srcVotes := st.Eth1DataVotes()
votes := make([]*Eth1Data, len(srcVotes))
for i, e := range srcVotes {
votes[i] = Eth1DataFromConsensus(e)
}
srcVals := st.Validators()
vals := make([]*Validator, len(srcVals))
for i, v := range srcVals {
vals[i] = ValidatorFromConsensus(v)
}
srcBals := st.Balances()
bals := make([]string, len(srcBals))
for i, b := range srcBals {
bals[i] = fmt.Sprintf("%d", b)
}
srcRm := st.RandaoMixes()
rm := make([]string, len(srcRm))
for i, m := range srcRm {
rm[i] = hexutil.Encode(m)
}
srcSlashings := st.Slashings()
slashings := make([]string, len(srcSlashings))
for i, s := range srcSlashings {
slashings[i] = fmt.Sprintf("%d", s)
}
srcPrevPart, err := st.PreviousEpochParticipation()
if err != nil {
return nil, err
}
prevPart := make([]string, len(srcPrevPart))
for i, p := range srcPrevPart {
prevPart[i] = fmt.Sprintf("%d", p)
}
srcCurrPart, err := st.CurrentEpochParticipation()
if err != nil {
return nil, err
}
currPart := make([]string, len(srcCurrPart))
for i, p := range srcCurrPart {
currPart[i] = fmt.Sprintf("%d", p)
}
srcIs, err := st.InactivityScores()
if err != nil {
return nil, err
}
is := make([]string, len(srcIs))
for i, s := range srcIs {
is[i] = fmt.Sprintf("%d", s)
}
currSc, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSc, err := st.NextSyncCommittee()
if err != nil {
return nil, err
}
execData, err := st.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
if !ok {
return nil, errPayloadHeaderNotFound
}
payload, err := ExecutionPayloadHeaderElectraFromConsensus(srcPayload)
if err != nil {
return nil, err
}
srcHs, err := st.HistoricalSummaries()
if err != nil {
return nil, err
}
hs := make([]*HistoricalSummary, len(srcHs))
for i, s := range srcHs {
hs[i] = HistoricalSummaryFromConsensus(s)
}
nwi, err := st.NextWithdrawalIndex()
if err != nil {
return nil, err
}
nwvi, err := st.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
drsi, err := st.DepositRequestsStartIndex()
if err != nil {
return nil, err
}
dbtc, err := st.DepositBalanceToConsume()
if err != nil {
return nil, err
}
ebtc, err := st.ExitBalanceToConsume()
if err != nil {
return nil, err
}
eee, err := st.EarliestExitEpoch()
if err != nil {
return nil, err
}
cbtc, err := st.ConsolidationBalanceToConsume()
if err != nil {
return nil, err
}
ece, err := st.EarliestConsolidationEpoch()
if err != nil {
return nil, err
}
pbd, err := st.PendingDeposits()
if err != nil {
return nil, err
}
ppw, err := st.PendingPartialWithdrawals()
if err != nil {
return nil, err
}
pc, err := st.PendingConsolidations()
if err != nil {
return nil, err
}
return &BeaconStateFulu{
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
Slot: fmt.Sprintf("%d", st.Slot()),
Fork: ForkFromConsensus(st.Fork()),
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
BlockRoots: br,
StateRoots: sr,
HistoricalRoots: hr,
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
Eth1DataVotes: votes,
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
Validators: vals,
Balances: bals,
RandaoMixes: rm,
Slashings: slashings,
PreviousEpochParticipation: prevPart,
CurrentEpochParticipation: currPart,
JustificationBits: hexutil.Encode(st.JustificationBits()),
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
InactivityScores: is,
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
LatestExecutionPayloadHeader: payload,
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
HistoricalSummaries: hs,
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
EarliestExitEpoch: fmt.Sprintf("%d", eee),
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
PendingDeposits: PendingDepositsFromConsensus(pbd),
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
}, nil
}

View File

@@ -36,6 +36,13 @@ type AttestationElectra struct {
CommitteeBits string `json:"committee_bits"`
}
type SingleAttestation struct {
CommitteeIndex string `json:"committee_index"`
AttesterIndex string `json:"attester_index"`
Data *AttestationData `json:"data"`
Signature string `json:"signature"`
}
type AttestationData struct {
Slot string `json:"slot"`
CommitteeIndex string `json:"index"`

View File

@@ -142,41 +142,81 @@ type BeaconStateDeneb struct {
}
type BeaconStateElectra struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root"`
Slot string `json:"slot"`
Fork *Fork `json:"fork"`
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
BlockRoots []string `json:"block_roots"`
StateRoots []string `json:"state_roots"`
HistoricalRoots []string `json:"historical_roots"`
Eth1Data *Eth1Data `json:"eth1_data"`
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*Validator `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
CurrentEpochParticipation []string `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits"`
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
LatestExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
EarliestExitEpoch string `json:"earliest_exit_epoch"`
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root"`
Slot string `json:"slot"`
Fork *Fork `json:"fork"`
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
BlockRoots []string `json:"block_roots"`
StateRoots []string `json:"state_roots"`
HistoricalRoots []string `json:"historical_roots"`
Eth1Data *Eth1Data `json:"eth1_data"`
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*Validator `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
CurrentEpochParticipation []string `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits"`
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"latest_execution_payload_header"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
EarliestExitEpoch string `json:"earliest_exit_epoch"`
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
}
type BeaconStateFulu struct {
GenesisTime string `json:"genesis_time"`
GenesisValidatorsRoot string `json:"genesis_validators_root"`
Slot string `json:"slot"`
Fork *Fork `json:"fork"`
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
BlockRoots []string `json:"block_roots"`
StateRoots []string `json:"state_roots"`
HistoricalRoots []string `json:"historical_roots"`
Eth1Data *Eth1Data `json:"eth1_data"`
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
Eth1DepositIndex string `json:"eth1_deposit_index"`
Validators []*Validator `json:"validators"`
Balances []string `json:"balances"`
RandaoMixes []string `json:"randao_mixes"`
Slashings []string `json:"slashings"`
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
CurrentEpochParticipation []string `json:"current_epoch_participation"`
JustificationBits string `json:"justification_bits"`
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
InactivityScores []string `json:"inactivity_scores"`
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"latest_execution_payload_header"`
NextWithdrawalIndex string `json:"next_withdrawal_index"`
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
EarliestExitEpoch string `json:"earliest_exit_epoch"`
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
}

View File

@@ -43,6 +43,7 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/electra:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",

View File

@@ -2,6 +2,7 @@ package blockchain
import (
"io"
"os"
"testing"
"github.com/sirupsen/logrus"
@@ -11,5 +12,5 @@ func TestMain(m *testing.M) {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(io.Discard)
m.Run()
os.Exit(m.Run())
}

View File

@@ -268,6 +268,9 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
if err != nil {
return false, errors.Wrap(err, "could not get execution requests")
}
if requests == nil {
return false, errors.New("nil execution requests")
}
}
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
@@ -362,15 +365,16 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
return emptyAttri
}
var attr payloadattribute.Attributer
switch st.Version() {
case version.Deneb, version.Electra:
v := st.Version()
if v >= version.Deneb {
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
}
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV3{
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
@@ -381,13 +385,18 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
case version.Capella:
return attr
}
if v >= version.Capella {
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
}
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV2{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
@@ -397,8 +406,12 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
case version.Bellatrix:
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
return attr
}
if v >= version.Bellatrix {
attr, err := payloadattribute.New(&enginev1.PayloadAttributes{
Timestamp: uint64(t.Unix()),
PrevRandao: prevRando,
SuggestedFeeRecipient: val.FeeRecipient[:],
@@ -407,12 +420,12 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
log.WithError(err).Error("Could not get payload attribute")
return emptyAttri
}
default:
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
return emptyAttri
return attr
}
return attr
log.WithField("version", version.String(st.Version())).Error("Could not get payload attribute due to unknown state version")
return emptyAttri
}
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.

View File

@@ -20,16 +20,17 @@ func Verify(sidecars ...blocks.ROBlob) error {
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
proofs := make([]GoKZG.KZGProof, len(sidecars))
for i, sidecar := range sidecars {
blobs[i] = bytesToBlob(sidecar.Blob)
blobs[i] = *bytesToBlob(sidecar.Blob)
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
}
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
}
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
func bytesToBlob(blob []byte) *GoKZG.Blob {
var ret GoKZG.Blob
copy(ret[:], blob)
return
return &ret
}
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {

View File

@@ -10,11 +10,11 @@ import (
)
func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZGProof, error) {
commitment, err := kzgContext.BlobToKZGCommitment(blob, 0)
commitment, err := kzgContext.BlobToKZGCommitment(&blob, 0)
if err != nil {
return GoKZG.KZGCommitment{}, GoKZG.KZGProof{}, err
}
proof, err := kzgContext.ComputeBlobKZGProof(blob, commitment, 0)
proof, err := kzgContext.ComputeBlobKZGProof(&blob, commitment, 0)
if err != nil {
return GoKZG.KZGCommitment{}, GoKZG.KZGProof{}, err
}
@@ -31,7 +31,7 @@ func TestBytesToAny(t *testing.T) {
blob := GoKZG.Blob{0x01, 0x02}
commitment := GoKZG.KZGCommitment{0x01, 0x02}
proof := GoKZG.KZGProof{0x01, 0x02}
require.DeepEqual(t, blob, bytesToBlob(bytes))
require.DeepEqual(t, blob, *bytesToBlob(bytes))
require.DeepEqual(t, commitment, bytesToCommitment(bytes))
require.DeepEqual(t, proof, bytesToKZGProof(bytes))
}

View File

@@ -69,6 +69,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
defer s.processLightClientUpdates(cfg)
defer s.saveLightClientUpdate(cfg)
defer s.saveLightClientBootstrap(cfg)
}
defer s.sendStateFeedOnBlock(cfg)
defer reportProcessingTime(startTime)

View File

@@ -1076,6 +1076,48 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
service.InsertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
}
func TestService_insertSlashingsToForkChoiceStoreElectra(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, 100)
att1 := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 := privKeys[0].Sign(signingRoot[:])
sig1 := privKeys[1].Sign(signingRoot[:])
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att1.Signature = aggregateSig.Marshal()
att2 := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
AttestingIndices: []uint64{0, 1},
})
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = privKeys[0].Sign(signingRoot[:])
sig1 = privKeys[1].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att2.Signature = aggregateSig.Marshal()
slashings := []*ethpb.AttesterSlashingElectra{
{
Attestation_1: att1,
Attestation_2: att2,
},
}
b := util.NewBeaconBlockElectra()
b.Block.Body.AttesterSlashings = slashings
wb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
service.InsertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
}
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx

View File

@@ -7,6 +7,7 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
@@ -468,6 +469,9 @@ func (s *Service) validateStateTransition(ctx context.Context, preState state.Be
stateTransitionStartTime := time.Now()
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
if ctx.Err() != nil || electra.IsExecutionRequestError(err) {
return nil, err
}
return nil, invalidBlock{error: err}
}
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))

View File

@@ -33,6 +33,7 @@ type MockBuilderService struct {
Bid *ethpb.SignedBuilderBid
BidCapella *ethpb.SignedBuilderBidCapella
BidDeneb *ethpb.SignedBuilderBidDeneb
BidElectra *ethpb.SignedBuilderBidElectra
RegistrationCache *cache.RegistrationCache
ErrGetHeader error
ErrRegisterValidator error
@@ -59,7 +60,7 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Deneb:
case version.Deneb, version.Electra:
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
@@ -72,6 +73,9 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
// GetHeader for mocking.
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
if slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch || s.BidElectra != nil {
return builder.WrappedSignedBuilderBidElectra(s.BidElectra)
}
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch || s.BidDeneb != nil {
return builder.WrappedSignedBuilderBidDeneb(s.BidDeneb)
}

View File

@@ -53,7 +53,7 @@ func NewAttestationCache() *AttestationCache {
//
// - For unaggregated attestations, it adds the attestation bit to attestation bits of the running aggregate,
// which is the first aggregate for the slot.
// - For aggregated attestations, it appends the attestation to the existng list of attestations for the slot.
// - For aggregated attestations, it appends the attestation to the existing list of attestations for the slot.
func (c *AttestationCache) Add(att ethpb.Att) error {
if att.IsNil() {
log.Debug("Attempted to add a nil attestation to the attestation cache")

View File

@@ -1,9 +1,10 @@
package cache
import (
"os"
"testing"
)
func TestMain(m *testing.M) {
m.Run()
os.Exit(m.Run())
}

View File

@@ -105,10 +105,9 @@ func TestProcessSlashings_NotSlashed(t *testing.T) {
}
s, err := state_native.InitializeFromProtoAltair(base)
require.NoError(t, err)
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplierAltair)
require.NoError(t, err)
require.NoError(t, epoch.ProcessSlashings(s))
wanted := params.BeaconConfig().MaxEffectiveBalance
assert.Equal(t, wanted, newState.Balances()[0], "Unexpected slashed balance")
assert.Equal(t, wanted, s.Balances()[0], "Unexpected slashed balance")
}
func TestProcessSlashings_SlashedLess(t *testing.T) {
@@ -176,9 +175,8 @@ func TestProcessSlashings_SlashedLess(t *testing.T) {
original := proto.Clone(tt.state)
s, err := state_native.InitializeFromProtoAltair(tt.state)
require.NoError(t, err)
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplierAltair)
require.NoError(t, err)
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
require.NoError(t, epoch.ProcessSlashings(s))
assert.Equal(t, tt.want, s.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, s.Balances()[0])
})
}
}
@@ -192,6 +190,5 @@ func TestProcessSlashings_BadValue(t *testing.T) {
}
s, err := state_native.InitializeFromProtoAltair(base)
require.NoError(t, err)
_, err = epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplierAltair)
require.ErrorContains(t, "addition overflows", err)
require.ErrorContains(t, "addition overflows", epoch.ProcessSlashings(s))
}

View File

@@ -69,12 +69,7 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
}
// Modified in Altair and Bellatrix.
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
if err != nil {
return err
}
state, err = e.ProcessSlashings(state, proportionalSlashingMultiplier)
if err != nil {
if err := e.ProcessSlashings(state); err != nil {
return err
}
state, err = e.ProcessEth1DataReset(state)

View File

@@ -7,12 +7,14 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
@@ -105,293 +107,162 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
}
func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
for _, vv := range beaconState.Validators() {
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
}
statePhase0, keysPhase0 := util.DeterministicGenesisState(t, 100)
stateAltair, keysAltair := util.DeterministicGenesisStateAltair(t, 100)
stateBellatrix, keysBellatrix := util.DeterministicGenesisStateBellatrix(t, 100)
stateCapella, keysCapella := util.DeterministicGenesisStateCapella(t, 100)
stateDeneb, keysDeneb := util.DeterministicGenesisStateDeneb(t, 100)
stateElectra, keysElectra := util.DeterministicGenesisStateElectra(t, 100)
att1 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
att1Phase0 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 := privKeys[0].Sign(signingRoot[:])
sig1 := privKeys[1].Sign(signingRoot[:])
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att1.Signature = aggregateSig.Marshal()
att2 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
att2Phase0 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
})
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = privKeys[0].Sign(signingRoot[:])
sig1 = privKeys[1].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att2.Signature = aggregateSig.Marshal()
slashings := []*ethpb.AttesterSlashing{
{
Attestation_1: att1,
Attestation_2: att2,
},
}
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
require.NoError(t, beaconState.SetSlot(currentSlot))
b := util.NewBeaconBlock()
b.Block = &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
AttesterSlashings: slashings,
},
}
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
// Given the intersection of slashable indices is [1], only validator
// at index 1 should be slashed and exited. We confirm this below.
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
t.Errorf(
`
Expected validator at index 1's exit epoch to match
%d, received %d instead
`,
beaconState.Validators()[1].ExitEpoch,
newRegistry[1].ExitEpoch,
)
}
require.Equal(t, uint64(31750000000), newState.Balances()[1])
require.Equal(t, uint64(32000000000), newState.Balances()[2])
}
func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
for _, vv := range beaconState.Validators() {
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
}
att1 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
att1Electra := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 := privKeys[0].Sign(signingRoot[:])
sig1 := privKeys[1].Sign(signingRoot[:])
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att1.Signature = aggregateSig.Marshal()
att2 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
att2Electra := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
AttestingIndices: []uint64{0, 1},
})
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = privKeys[0].Sign(signingRoot[:])
sig1 = privKeys[1].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att2.Signature = aggregateSig.Marshal()
slashings := []*ethpb.AttesterSlashing{
slashingPhase0 := &ethpb.AttesterSlashing{
Attestation_1: att1Phase0,
Attestation_2: att2Phase0,
}
slashingElectra := &ethpb.AttesterSlashingElectra{
Attestation_1: att1Electra,
Attestation_2: att2Electra,
}
type testCase struct {
name string
st state.BeaconState
keys []bls.SecretKey
att1 ethpb.IndexedAtt
att2 ethpb.IndexedAtt
slashing ethpb.AttSlashing
slashedBalance uint64
}
testCases := []testCase{
{
Attestation_1: att1,
Attestation_2: att2,
name: "phase0",
st: statePhase0,
keys: keysPhase0,
att1: att1Phase0,
att2: att2Phase0,
slashing: slashingPhase0,
slashedBalance: 31750000000,
},
{
name: "altair",
st: stateAltair,
keys: keysAltair,
att1: att1Phase0,
att2: att2Phase0,
slashing: slashingPhase0,
slashedBalance: 31500000000,
},
{
name: "bellatrix",
st: stateBellatrix,
keys: keysBellatrix,
att1: att1Phase0,
att2: att2Phase0,
slashing: slashingPhase0,
slashedBalance: 31000000000,
},
{
name: "capella",
st: stateCapella,
keys: keysCapella,
att1: att1Phase0,
att2: att2Phase0,
slashing: slashingPhase0,
slashedBalance: 31000000000,
},
{
name: "deneb",
st: stateDeneb,
keys: keysDeneb,
att1: att1Phase0,
att2: att2Phase0,
slashing: slashingPhase0,
slashedBalance: 31000000000,
},
{
name: "electra",
st: stateElectra,
keys: keysElectra,
att1: att1Electra,
att2: att2Electra,
slashing: slashingElectra,
slashedBalance: 31992187500,
},
}
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
require.NoError(t, beaconState.SetSlot(currentSlot))
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
for _, vv := range tc.st.Validators() {
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
}
b := util.NewBeaconBlock()
b.Block = &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
AttesterSlashings: slashings,
},
}
domain, err := signing.Domain(tc.st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, tc.st.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(tc.att1.GetData(), domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 := tc.keys[0].Sign(signingRoot[:])
sig1 := tc.keys[1].Sign(signingRoot[:])
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
if tc.att1.Version() >= version.Electra {
tc.att1.(*ethpb.IndexedAttestationElectra).Signature = aggregateSig.Marshal()
} else {
tc.att1.(*ethpb.IndexedAttestation).Signature = aggregateSig.Marshal()
}
// Given the intersection of slashable indices is [1], only validator
// at index 1 should be slashed and exited. We confirm this below.
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
t.Errorf(
`
signingRoot, err = signing.ComputeSigningRoot(tc.att2.GetData(), domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = tc.keys[0].Sign(signingRoot[:])
sig1 = tc.keys[1].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
if tc.att2.Version() >= version.Electra {
tc.att2.(*ethpb.IndexedAttestationElectra).Signature = aggregateSig.Marshal()
} else {
tc.att2.(*ethpb.IndexedAttestation).Signature = aggregateSig.Marshal()
}
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
require.NoError(t, tc.st.SetSlot(currentSlot))
newState, err := blocks.ProcessAttesterSlashings(context.Background(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
// Given the intersection of slashable indices is [1], only validator
// at index 1 should be slashed and exited. We confirm this below.
if newRegistry[1].ExitEpoch != tc.st.Validators()[1].ExitEpoch {
t.Errorf(
`
Expected validator at index 1's exit epoch to match
%d, received %d instead
`,
beaconState.Validators()[1].ExitEpoch,
newRegistry[1].ExitEpoch,
)
}
tc.st.Validators()[1].ExitEpoch,
newRegistry[1].ExitEpoch,
)
}
require.Equal(t, uint64(31500000000), newState.Balances()[1])
require.Equal(t, uint64(32000000000), newState.Balances()[2])
}
func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, 100)
for _, vv := range beaconState.Validators() {
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
}
att1 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 := privKeys[0].Sign(signingRoot[:])
sig1 := privKeys[1].Sign(signingRoot[:])
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att1.Signature = aggregateSig.Marshal()
att2 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
})
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = privKeys[0].Sign(signingRoot[:])
sig1 = privKeys[1].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att2.Signature = aggregateSig.Marshal()
slashings := []*ethpb.AttesterSlashing{
{
Attestation_1: att1,
Attestation_2: att2,
},
}
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
require.NoError(t, beaconState.SetSlot(currentSlot))
b := util.NewBeaconBlock()
b.Block = &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
AttesterSlashings: slashings,
},
}
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
// Given the intersection of slashable indices is [1], only validator
// at index 1 should be slashed and exited. We confirm this below.
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
t.Errorf(
`
Expected validator at index 1's exit epoch to match
%d, received %d instead
`,
beaconState.Validators()[1].ExitEpoch,
newRegistry[1].ExitEpoch,
)
}
require.Equal(t, uint64(31000000000), newState.Balances()[1])
require.Equal(t, uint64(32000000000), newState.Balances()[2])
}
func TestProcessAttesterSlashings_AppliesCorrectStatusCapella(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateCapella(t, 100)
for _, vv := range beaconState.Validators() {
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
}
att1 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{0, 1},
})
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 := privKeys[0].Sign(signingRoot[:])
sig1 := privKeys[1].Sign(signingRoot[:])
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att1.Signature = aggregateSig.Marshal()
att2 := util.HydrateIndexedAttestation(&ethpb.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
})
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = privKeys[0].Sign(signingRoot[:])
sig1 = privKeys[1].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
att2.Signature = aggregateSig.Marshal()
slashings := []*ethpb.AttesterSlashing{
{
Attestation_1: att1,
Attestation_2: att2,
},
}
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
require.NoError(t, beaconState.SetSlot(currentSlot))
b := util.NewBeaconBlock()
b.Block = &ethpb.BeaconBlock{
Body: &ethpb.BeaconBlockBody{
AttesterSlashings: slashings,
},
}
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
// Given the intersection of slashable indices is [1], only validator
// at index 1 should be slashed and exited. We confirm this below.
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
t.Errorf(
`
Expected validator at index 1's exit epoch to match
%d, received %d instead
`,
beaconState.Validators()[1].ExitEpoch,
newRegistry[1].ExitEpoch,
)
}
require.Equal(t, uint64(31000000000), newState.Balances()[1])
require.Equal(t, uint64(32000000000), newState.Balances()[2])
require.Equal(t, tc.slashedBalance, newState.Balances()[1])
require.Equal(t, uint64(32000000000), newState.Balances()[2])
})
}
}

View File

@@ -198,7 +198,7 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadElectra{
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),

View File

@@ -1152,7 +1152,7 @@ func TestProcessWithdrawals(t *testing.T) {
}
st, err = state_native.InitializeFromProtoUnsafeElectra(spb)
require.NoError(t, err)
p, err = consensusblocks.WrappedExecutionPayloadElectra(&enginev1.ExecutionPayloadElectra{Withdrawals: test.Args.Withdrawals})
p, err = consensusblocks.WrappedExecutionPayloadDeneb(&enginev1.ExecutionPayloadDeneb{Withdrawals: test.Args.Withdrawals})
require.NoError(t, err)
default:
t.Fatalf("Add a beacon state setup for version %s", version.String(fork))

View File

@@ -8,6 +8,7 @@ go_library(
"consolidations.go",
"deposits.go",
"effective_balance_updates.go",
"error.go",
"registry_updates.go",
"transition.go",
"transition_no_verify_sig.go",
@@ -55,13 +56,16 @@ go_test(
"deposit_fuzz_test.go",
"deposits_test.go",
"effective_balance_updates_test.go",
"error_test.go",
"export_test.go",
"registry_updates_test.go",
"transition_no_verify_sig_test.go",
"transition_test.go",
"upgrade_test.go",
"validator_test.go",
"withdrawals_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
@@ -86,6 +90,7 @@ go_test(
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],

View File

@@ -6,6 +6,7 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -129,6 +130,57 @@ func TestComputeConsolidationEpochAndUpdateChurn(t *testing.T) {
expectedEpoch: 16, // Flows into another epoch.
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
},
{
name: "balance to consume is zero, consolidation balance at limit",
state: func(t *testing.T) state.BeaconState {
activeBal := 32000000000000000 // 32M ETH
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 16,
ConsolidationBalanceToConsume: 0,
Validators: createValidatorsWithTotalActiveBalance(primitives.Gwei(activeBal)),
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000),
expectedEpoch: 17, // Flows into another epoch.
expectedConsolidationBalanceToConsume: 0,
},
{
name: "consolidation balance equals consolidation balance to consume",
state: func(t *testing.T) state.BeaconState {
activeBal := 32000000000000000 // 32M ETH
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 16,
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(32000000000000000),
Validators: createValidatorsWithTotalActiveBalance(primitives.Gwei(activeBal)),
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000),
expectedEpoch: 16,
expectedConsolidationBalanceToConsume: 0,
},
{
name: "consolidation balance exceeds limit by one",
state: func(t *testing.T) state.BeaconState {
activeBal := 32000000000000000 // 32M ETH
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 16,
ConsolidationBalanceToConsume: 0,
Validators: createValidatorsWithTotalActiveBalance(primitives.Gwei(activeBal)),
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000)+1,
expectedEpoch: 18, // Flows into another epoch.
expectedConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(32000000000000000)-1,
},
}
for _, tt := range tests {

View File

@@ -9,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -184,6 +185,9 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
for _, cr := range reqs {
if cr == nil {
return errors.New("nil consolidation request")
}
if ctx.Err() != nil {
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
}
@@ -233,13 +237,18 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
}
roSrcV, err := state_native.NewValidator(srcV)
if err != nil {
return err
}
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
if err != nil {
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
}
// Verify source withdrawal credentials
if !helpers.HasExecutionWithdrawalCredentials(srcV) {
if !roSrcV.HasExecutionWithdrawalCredentials() {
continue
}
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
@@ -248,7 +257,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
}
// Target validator must have their withdrawal credentials set appropriately.
if !helpers.HasCompoundingWithdrawalCredential(tgtV) {
if !tgtV.HasCompoundingWithdrawalCredentials() {
continue
}
@@ -256,7 +265,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
continue
}
// Neither validator are exiting.
// Neither validator is exiting.
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
continue
}
@@ -364,7 +373,7 @@ func IsValidSwitchToCompoundingRequest(st state.BeaconState, req *enginev1.Conso
return false
}
if !helpers.HasETH1WithdrawalCredential(srcV) {
if !srcV.HasETH1WithdrawalCredentials() {
return false
}

View File

@@ -209,7 +209,22 @@ func TestProcessConsolidationRequests(t *testing.T) {
state state.BeaconState
reqs []*enginev1.ConsolidationRequest
validate func(*testing.T, state.BeaconState)
wantErr bool
}{
{
name: "nil request",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{}
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{nil},
validate: func(t *testing.T, st state.BeaconState) {
require.DeepEqual(t, st, st)
},
wantErr: true,
},
{
name: "one valid request",
state: func() state.BeaconState {
@@ -405,7 +420,13 @@ func TestProcessConsolidationRequests(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := electra.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
require.NoError(t, err)
if (err != nil) != tt.wantErr {
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !tt.wantErr {
require.NoError(t, err)
}
if tt.validate != nil {
tt.validate(t, tt.state)
}

View File

@@ -15,7 +15,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
@@ -273,7 +272,7 @@ func ProcessPendingDeposits(ctx context.Context, st state.BeaconState, activeBal
isChurnLimitReached := false
var pendingDepositsToBatchVerify []*ethpb.PendingDeposit
var pendingDepositsToPostpone []*eth.PendingDeposit
var pendingDepositsToPostpone []*ethpb.PendingDeposit
depBalToConsume, err := st.DepositBalanceToConsume()
if err != nil {
@@ -386,14 +385,8 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
return errors.Wrap(err, "batch signature verification failed")
}
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
// Process each deposit individually
for _, pendingDeposit := range pendingDeposits {
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
if !found {
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
}
validSignature := allSignaturesVerified
// If batch verification failed, check the individual deposit signature
@@ -411,7 +404,8 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
// Add validator to the registry if the signature is valid
if validSignature {
if found {
_, has := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
if has {
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
return errors.Wrap(err, "could not increase balance")
@@ -593,10 +587,10 @@ func processDepositRequest(beaconState state.BeaconState, request *enginev1.Depo
if err != nil {
return nil, errors.Wrap(err, "could not get deposit requests start index")
}
if request == nil {
return nil, errors.New("nil deposit request")
}
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
if request == nil {
return nil, errors.New("nil deposit request")
}
if err := beaconState.SetDepositRequestsStartIndex(request.Index); err != nil {
return nil, errors.Wrap(err, "could not set deposit requests start index")
}

View File

@@ -333,6 +333,7 @@ func TestProcessDepositRequests(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, 1)
sk, err := bls.RandKey()
require.NoError(t, err)
require.NoError(t, st.SetDepositRequestsStartIndex(1))
t.Run("empty requests continues", func(t *testing.T) {
newSt, err := electra.ProcessDepositRequests(context.Background(), st, []*enginev1.DepositRequest{})

View File

@@ -3,7 +3,6 @@ package electra
import (
"fmt"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -40,7 +39,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
if val == nil {
if val.IsNil() {
return nil, fmt.Errorf("validator %d is nil in state", idx)
}
if idx >= len(bals) {
@@ -49,7 +48,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
balance := bals[idx]
effectiveBalanceLimit := params.BeaconConfig().MinActivationBalance
if helpers.HasCompoundingWithdrawalCredential(val) {
if val.HasCompoundingWithdrawalCredentials() {
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
}

View File

@@ -77,7 +77,7 @@ func TestProcessEffectiveBalnceUpdates(t *testing.T) {
Validators: []*eth.Validator{
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance / 2,
WithdrawalCredentials: nil,
WithdrawalCredentials: make([]byte, 32),
},
},
Balances: []uint64{

View File

@@ -0,0 +1,16 @@
package electra
import "github.com/pkg/errors"
type execReqErr struct {
error
}
// IsExecutionRequestError returns true if the error has `execReqErr`.
func IsExecutionRequestError(e error) bool {
if e == nil {
return false
}
var d execReqErr
return errors.As(e, &d)
}

View File

@@ -0,0 +1,45 @@
package electra
import (
"testing"
"github.com/pkg/errors"
)
func TestIsExecutionRequestError(t *testing.T) {
tests := []struct {
name string
err error
want bool
}{
{
name: "nil error",
err: nil,
want: false,
},
{
name: "random error",
err: errors.New("some error"),
want: false,
},
{
name: "execution request error",
err: execReqErr{errors.New("execution request failed")},
want: true,
},
{
name: "wrapped execution request error",
err: errors.Wrap(execReqErr{errors.New("execution request failed")}, "wrapped"),
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := IsExecutionRequestError(tt.err)
if got != tt.want {
t.Errorf("IsExecutionRequestError(%v) = %v, want %v", tt.err, got, tt.want)
}
})
}
}

View File

@@ -18,23 +18,24 @@ import (
//
// Spec pseudocode definition:
//
// def process_registry_updates(state: BeaconState) -> None:
// # Process activation eligibility and ejections
// for index, validator in enumerate(state.validators):
// if is_eligible_for_activation_queue(validator):
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
// def process_registry_updates(state: BeaconState) -> None:
// # Process activation eligibility and ejections
// for index, validator in enumerate(state.validators):
// if is_eligible_for_activation_queue(validator): # [Modified in Electra:EIP7251]
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
//
// if (
// is_active_validator(validator, get_current_epoch(state))
// and validator.effective_balance <= EJECTION_BALANCE
// ):
// initiate_validator_exit(state, ValidatorIndex(index))
// if (
// is_active_validator(validator, get_current_epoch(state))
// and validator.effective_balance <= EJECTION_BALANCE
// ):
// initiate_validator_exit(state, ValidatorIndex(index)) # [Modified in Electra:EIP7251]
//
// # Activate all eligible validators
// activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
// for validator in state.validators:
// if is_eligible_for_activation(state, validator):
// validator.activation_epoch = activation_epoch
// # Activate all eligible validators
// # [Modified in Electra:EIP7251]
// activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
// for validator in state.validators:
// if is_eligible_for_activation(state, validator):
// validator.activation_epoch = activation_epoch
func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
currentEpoch := time.CurrentEpoch(st)
ejectionBal := params.BeaconConfig().EjectionBalance
@@ -90,8 +91,8 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
}
}
// Activate all eligible validators.
for _, idx := range eligibleForActivation {
// Activate all eligible validators.
v, err := st.ValidatorAtIndex(idx)
if err != nil {
return err

View File

@@ -40,14 +40,17 @@ var (
// process_justification_and_finalization(state)
// process_inactivity_updates(state)
// process_rewards_and_penalties(state)
// process_registry_updates(state)
// process_slashings(state)
// process_registry_updates(state) # [Modified in Electra:EIP7251]
// process_slashings(state) # [Modified in Electra:EIP7251]
// process_eth1_data_reset(state)
// process_pending_deposits(state) # New in EIP7251
// process_pending_consolidations(state) # New in EIP7251
// process_effective_balance_updates(state)
// process_pending_deposits(state) # [New in Electra:EIP7251]
// process_pending_consolidations(state) # [New in Electra:EIP7251]
// process_effective_balance_updates(state) # [Modified in Electra:EIP7251]
// process_slashings_reset(state)
// process_randao_mixes_reset(state)
// process_historical_summaries_update(state)
// process_participation_flag_updates(state)
// process_sync_committee_updates(state)
func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
_, span := trace.StartSpan(ctx, "electra.ProcessEpoch")
defer span.End()
@@ -75,24 +78,16 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
if err != nil {
return errors.Wrap(err, "could not process rewards and penalties")
}
if err := ProcessRegistryUpdates(ctx, state); err != nil {
return errors.Wrap(err, "could not process registry updates")
}
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
if err != nil {
return err
}
state, err = ProcessSlashings(state, proportionalSlashingMultiplier)
if err != nil {
if err := ProcessSlashings(state); err != nil {
return err
}
state, err = ProcessEth1DataReset(state)
if err != nil {
return err
}
if err = ProcessPendingDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
return err
}
@@ -102,7 +97,6 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
if err = ProcessEffectiveBalanceUpdates(state); err != nil {
return err
}
state, err = ProcessSlashingsReset(state)
if err != nil {
return err
@@ -115,17 +109,14 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
if err != nil {
return err
}
state, err = ProcessParticipationFlagUpdates(state)
if err != nil {
return err
}
_, err = ProcessSyncCommitteeUpdates(ctx, state)
if err != nil {
return err
}
return nil
}

View File

@@ -2,7 +2,6 @@ package electra
import (
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -82,16 +81,31 @@ func ProcessOperations(
if err != nil {
return nil, errors.Wrap(err, "could not get execution requests")
}
for _, d := range requests.Deposits {
if d == nil {
return nil, errors.New("nil deposit request")
}
}
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
if err != nil {
return nil, errors.Wrap(err, "could not process deposit requests")
return nil, execReqErr{errors.Wrap(err, "could not process deposit requests")}
}
for _, w := range requests.Withdrawals {
if w == nil {
return nil, errors.New("nil withdrawal request")
}
}
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
if err != nil {
return nil, errors.Wrap(err, "could not process withdrawal requests")
return nil, execReqErr{errors.Wrap(err, "could not process withdrawal requests")}
}
for _, c := range requests.Consolidations {
if c == nil {
return nil, errors.New("nil consolidation request")
}
}
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
return nil, fmt.Errorf("could not process consolidation requests: %w", err)
return nil, execReqErr{errors.Wrap(err, "could not process consolidation requests")}
}
return st, nil
}

View File

@@ -0,0 +1,61 @@
package electra_test
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestProcessOperationsWithNilRequests(t *testing.T) {
tests := []struct {
name string
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
errMsg string
}{
{
name: "Nil deposit request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
},
errMsg: "nil deposit request",
},
{
name: "Nil withdrawal request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
},
errMsg: "nil withdrawal request",
},
{
name: "Nil consolidation request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
},
errMsg: "nil consolidation request",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
st, ks := util.DeterministicGenesisStateElectra(t, 128)
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
tc.modifyBlk(blk)
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
_, err = electra.ProcessOperations(context.Background(), st, b.Block())
require.ErrorContains(t, tc.errMsg, err)
})
}
}

View File

@@ -194,7 +194,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
if val.ActivationEpoch() == params.BeaconConfig().FarFutureEpoch {
preActivationIndices = append(preActivationIndices, primitives.ValidatorIndex(index))
}
if helpers.HasCompoundingWithdrawalCredential(val) {
if val.HasCompoundingWithdrawalCredentials() {
compoundWithdrawalIndices = append(compoundWithdrawalIndices, primitives.ValidatorIndex(index))
}
return nil
@@ -240,7 +240,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderElectra{
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),

View File

@@ -102,7 +102,7 @@ func TestUpgradeToElectra(t *testing.T) {
header, err := mSt.LatestExecutionPayloadHeader()
require.NoError(t, err)
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
require.Equal(t, true, ok)
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
require.NoError(t, err)
@@ -111,7 +111,7 @@ func TestUpgradeToElectra(t *testing.T) {
wdRoot, err := prevHeader.WithdrawalsRoot()
require.NoError(t, err)
wanted := &enginev1.ExecutionPayloadHeaderElectra{
wanted := &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: prevHeader.ParentHash(),
FeeRecipient: prevHeader.FeeRecipient(),
StateRoot: prevHeader.StateRoot(),

View File

@@ -116,7 +116,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
return nil, err
}
// Verify withdrawal credentials
hasCorrectCredential := helpers.HasExecutionWithdrawalCredentials(validator)
hasCorrectCredential := validator.HasExecutionWithdrawalCredentials()
wc := validator.GetWithdrawalCredentials()
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
if !hasCorrectCredential || !isCorrectSourceAddress {
@@ -165,7 +165,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
hasExcessBalance := vBal > params.BeaconConfig().MinActivationBalance+pendingBalanceToWithdraw
// Only allow partial withdrawals with compounding withdrawal credentials
if helpers.HasCompoundingWithdrawalCredential(validator) && hasSufficientEffectiveBalance && hasExcessBalance {
if validator.HasCompoundingWithdrawalCredentials() && hasSufficientEffectiveBalance && hasExcessBalance {
// Spec definition:
// to_withdraw = min(
// state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,

View File

@@ -38,6 +38,17 @@ func TestProcessWithdrawRequests(t *testing.T) {
wantFn func(t *testing.T, got state.BeaconState)
wantErr bool
}{
{
name: "nil request",
args: args{
st: func() state.BeaconState { return st }(),
wrs: []*enginev1.WithdrawalRequest{nil},
},
wantErr: true,
wantFn: func(t *testing.T, got state.BeaconState) {
require.DeepEqual(t, got, nil)
},
},
{
name: "happy path exit and withdrawal only",
args: args{

View File

@@ -141,29 +141,76 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
return st, nil
}
// ProcessSlashings processes the slashed validators during epoch processing,
// ProcessSlashings processes the slashed validators during epoch processing. This is a state mutating method.
//
// Electra spec definition:
//
// def process_slashings(state: BeaconState) -> None:
// epoch = get_current_epoch(state)
// total_balance = get_total_active_balance(state)
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
// if state.version == electra:
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from total balance to avoid uint64 overflow
// penalty_per_effective_balance_increment = adjusted_total_slashing_balance // (total_balance // increment)
// for index, validator in enumerate(state.validators):
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
// penalty = penalty_numerator // total_balance * increment
// if state.version == electra:
// epoch = get_current_epoch(state)
// total_balance = get_total_active_balance(state)
// adjusted_total_slashing_balance = min(
// sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX,
// total_balance
// )
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from total balance to avoid uint64 overflow
// penalty_per_effective_balance_increment = adjusted_total_slashing_balance // (total_balance // increment)
// for index, validator in enumerate(state.validators):
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
// effective_balance_increments = validator.effective_balance // increment
// # [Modified in Electra:EIP7251]
// penalty = penalty_per_effective_balance_increment * effective_balance_increments
// decrease_balance(state, ValidatorIndex(index), penalty)
func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.BeaconState, error) {
// decrease_balance(state, ValidatorIndex(index), penalty)
//
// Bellatrix spec definition:
//
// def process_slashings(state: BeaconState) -> None:
// epoch = get_current_epoch(state)
// total_balance = get_total_active_balance(state)
// adjusted_total_slashing_balance = min(
// sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX, # [Modified in Bellatrix]
// total_balance
// )
// for index, validator in enumerate(state.validators):
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
// penalty = penalty_numerator // total_balance * increment
// decrease_balance(state, ValidatorIndex(index), penalty)
//
// Altair spec definition:
//
// def process_slashings(state: BeaconState) -> None:
// epoch = get_current_epoch(state)
// total_balance = get_total_active_balance(state)
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance)
// for index, validator in enumerate(state.validators):
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
// penalty = penalty_numerator // total_balance * increment
// decrease_balance(state, ValidatorIndex(index), penalty)
//
// Phase0 spec definition:
//
// def process_slashings(state: BeaconState) -> None:
// epoch = get_current_epoch(state)
// total_balance = get_total_active_balance(state)
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
// for index, validator in enumerate(state.validators):
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
// penalty = penalty_numerator // total_balance * increment
// decrease_balance(state, ValidatorIndex(index), penalty)
func ProcessSlashings(st state.BeaconState) error {
slashingMultiplier, err := st.ProportionalSlashingMultiplier()
if err != nil {
return errors.Wrap(err, "could not get proportional slashing multiplier")
}
currentEpoch := time.CurrentEpoch(st)
totalBalance, err := helpers.TotalActiveBalance(st)
if err != nil {
return nil, errors.Wrap(err, "could not get total active balance")
return errors.Wrap(err, "could not get total active balance")
}
// Compute slashed balances in the current epoch
@@ -175,7 +222,7 @@ func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.Be
for _, slashing := range slashings {
totalSlashing, err = math.Add64(totalSlashing, slashing)
if err != nil {
return nil, err
return err
}
}
@@ -209,14 +256,14 @@ func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.Be
return nil
})
if err != nil {
return nil, err
return err
}
if changed {
if err := st.SetBalances(bals); err != nil {
return nil, err
return err
}
}
return st, nil
return nil
}
// ProcessEth1DataReset processes updates to ETH1 data votes during epoch processing.

View File

@@ -32,10 +32,9 @@ func TestProcessSlashings_NotSlashed(t *testing.T) {
}
s, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplier)
require.NoError(t, err)
require.NoError(t, epoch.ProcessSlashings(s))
wanted := params.BeaconConfig().MaxEffectiveBalance
assert.Equal(t, wanted, newState.Balances()[0], "Unexpected slashed balance")
assert.Equal(t, wanted, s.Balances()[0], "Unexpected slashed balance")
}
func TestProcessSlashings_SlashedLess(t *testing.T) {
@@ -111,9 +110,8 @@ func TestProcessSlashings_SlashedLess(t *testing.T) {
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
helpers.ClearCache()
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplier)
require.NoError(t, err)
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
require.NoError(t, epoch.ProcessSlashings(s))
assert.Equal(t, tt.want, s.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, s.Balances()[0])
})
}
}
@@ -365,8 +363,7 @@ func TestProcessSlashings_BadValue(t *testing.T) {
}
s, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
_, err = epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplier)
require.ErrorContains(t, "addition overflows", err)
require.ErrorContains(t, "addition overflows", epoch.ProcessSlashings(s))
}
func TestProcessHistoricalDataUpdate(t *testing.T) {
@@ -514,9 +511,8 @@ func TestProcessSlashings_SlashedElectra(t *testing.T) {
s, err := state_native.InitializeFromProtoElectra(tt.state)
require.NoError(t, err)
helpers.ClearCache()
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplierBellatrix)
require.NoError(t, err)
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
require.NoError(t, epoch.ProcessSlashings(s))
assert.Equal(t, tt.want, s.Balances()[0], "ProcessSlashings({%v}); s.Balances[0] = %d", original, s.Balances()[0])
})
}
}

View File

@@ -0,0 +1,32 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["upgrade.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/fulu",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["upgrade_test.go"],
deps = [
":go_default_library",
"//beacon-chain/core/time:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
],
)

View File

@@ -0,0 +1,175 @@
package fulu
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/fork.md#upgrading-the-state
func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := beaconState.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := beaconState.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
txRoot, err := payloadHeader.TransactionsRoot()
if err != nil {
return nil, err
}
wdRoot, err := payloadHeader.WithdrawalsRoot()
if err != nil {
return nil, err
}
wi, err := beaconState.NextWithdrawalIndex()
if err != nil {
return nil, err
}
vi, err := beaconState.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
summaries, err := beaconState.HistoricalSummaries()
if err != nil {
return nil, err
}
historicalRoots, err := beaconState.HistoricalRoots()
if err != nil {
return nil, err
}
excessBlobGas, err := payloadHeader.ExcessBlobGas()
if err != nil {
return nil, err
}
blobGasUsed, err := payloadHeader.BlobGasUsed()
if err != nil {
return nil, err
}
depositBalanceToConsume, err := beaconState.DepositBalanceToConsume()
if err != nil {
return nil, err
}
exitBalanceToConsume, err := beaconState.ExitBalanceToConsume()
if err != nil {
return nil, err
}
earliestExitEpoch, err := beaconState.EarliestExitEpoch()
if err != nil {
return nil, err
}
consolidationBalanceToConsume, err := beaconState.ConsolidationBalanceToConsume()
if err != nil {
return nil, err
}
earliestConsolidationEpoch, err := beaconState.EarliestConsolidationEpoch()
if err != nil {
return nil, err
}
pendingDeposits, err := beaconState.PendingDeposits()
if err != nil {
return nil, err
}
pendingPartialWithdrawals, err := beaconState.PendingPartialWithdrawals()
if err != nil {
return nil, err
}
pendingConsolidations, err := beaconState.PendingConsolidations()
if err != nil {
return nil, err
}
s := &ethpb.BeaconStateFulu{
GenesisTime: beaconState.GenesisTime(),
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
Slot: beaconState.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: beaconState.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().FuluForkVersion,
Epoch: time.CurrentEpoch(beaconState),
},
LatestBlockHeader: beaconState.LatestBlockHeader(),
BlockRoots: beaconState.BlockRoots(),
StateRoots: beaconState.StateRoots(),
HistoricalRoots: historicalRoots,
Eth1Data: beaconState.Eth1Data(),
Eth1DataVotes: beaconState.Eth1DataVotes(),
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
Validators: beaconState.Validators(),
Balances: beaconState.Balances(),
RandaoMixes: beaconState.RandaoMixes(),
Slashings: beaconState.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: beaconState.JustificationBits(),
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
LogsBloom: payloadHeader.LogsBloom(),
PrevRandao: payloadHeader.PrevRandao(),
BlockNumber: payloadHeader.BlockNumber(),
GasLimit: payloadHeader.GasLimit(),
GasUsed: payloadHeader.GasUsed(),
Timestamp: payloadHeader.Timestamp(),
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
ExcessBlobGas: excessBlobGas,
BlobGasUsed: blobGasUsed,
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summaries,
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
DepositBalanceToConsume: depositBalanceToConsume,
ExitBalanceToConsume: exitBalanceToConsume,
EarliestExitEpoch: earliestExitEpoch,
ConsolidationBalanceToConsume: consolidationBalanceToConsume,
EarliestConsolidationEpoch: earliestConsolidationEpoch,
PendingDeposits: pendingDeposits,
PendingPartialWithdrawals: pendingPartialWithdrawals,
PendingConsolidations: pendingConsolidations,
}
// Need to cast the beaconState to use in helper functions
post, err := state_native.InitializeFromProtoUnsafeFulu(s)
if err != nil {
return nil, errors.Wrap(err, "failed to initialize post fulu beaconState")
}
return post, nil
}

View File

@@ -0,0 +1,185 @@
package fulu_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/fulu"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestUpgradeToFulu(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, st.SetHistoricalRoots([][]byte{{1}}))
vals := st.Validators()
vals[0].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
vals[1].WithdrawalCredentials = []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}
require.NoError(t, st.SetValidators(vals))
bals := st.Balances()
bals[1] = params.BeaconConfig().MinActivationBalance + 1000
require.NoError(t, st.SetBalances(bals))
preForkState := st.Copy()
mSt, err := fulu.UpgradeToFulu(st)
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
require.Equal(t, preForkState.Slot(), mSt.Slot())
f := mSt.Fork()
require.DeepSSZEqual(t, &ethpb.Fork{
PreviousVersion: st.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().FuluForkVersion,
Epoch: time.CurrentEpoch(st),
}, f)
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
hr1, err := preForkState.HistoricalRoots()
require.NoError(t, err)
hr2, err := mSt.HistoricalRoots()
require.NoError(t, err)
require.DeepEqual(t, hr1, hr2)
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
require.DeepSSZEqual(t, preForkState.Validators(), mSt.Validators())
require.DeepSSZEqual(t, preForkState.Balances(), mSt.Balances())
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
numValidators := mSt.NumValidators()
p, err := mSt.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
p, err = mSt.CurrentEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
s, err := mSt.InactivityScores()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
csc, err := mSt.CurrentSyncCommittee()
require.NoError(t, err)
psc, err := preForkState.CurrentSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, csc)
nsc, err := mSt.NextSyncCommittee()
require.NoError(t, err)
psc, err = preForkState.NextSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, nsc)
header, err := mSt.LatestExecutionPayloadHeader()
require.NoError(t, err)
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
require.Equal(t, true, ok)
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
require.NoError(t, err)
txRoot, err := prevHeader.TransactionsRoot()
require.NoError(t, err)
wdRoot, err := prevHeader.WithdrawalsRoot()
require.NoError(t, err)
wanted := &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: prevHeader.ParentHash(),
FeeRecipient: prevHeader.FeeRecipient(),
StateRoot: prevHeader.StateRoot(),
ReceiptsRoot: prevHeader.ReceiptsRoot(),
LogsBloom: prevHeader.LogsBloom(),
PrevRandao: prevHeader.PrevRandao(),
BlockNumber: prevHeader.BlockNumber(),
GasLimit: prevHeader.GasLimit(),
GasUsed: prevHeader.GasUsed(),
Timestamp: prevHeader.Timestamp(),
ExtraData: prevHeader.ExtraData(),
BaseFeePerGas: prevHeader.BaseFeePerGas(),
BlockHash: prevHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
}
require.DeepEqual(t, wanted, protoHeader)
nwi, err := mSt.NextWithdrawalIndex()
require.NoError(t, err)
require.Equal(t, uint64(0), nwi)
lwvi, err := mSt.NextWithdrawalValidatorIndex()
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(0), lwvi)
summaries, err := mSt.HistoricalSummaries()
require.NoError(t, err)
require.Equal(t, 0, len(summaries))
preDepositRequestsStartIndex, err := preForkState.DepositRequestsStartIndex()
require.NoError(t, err)
postDepositRequestsStartIndex, err := mSt.DepositRequestsStartIndex()
require.NoError(t, err)
require.Equal(t, preDepositRequestsStartIndex, postDepositRequestsStartIndex)
preDepositBalanceToConsume, err := preForkState.DepositBalanceToConsume()
require.NoError(t, err)
postDepositBalanceToConsume, err := mSt.DepositBalanceToConsume()
require.NoError(t, err)
require.Equal(t, preDepositBalanceToConsume, postDepositBalanceToConsume)
preExitBalanceToConsume, err := preForkState.ExitBalanceToConsume()
require.NoError(t, err)
postExitBalanceToConsume, err := mSt.ExitBalanceToConsume()
require.NoError(t, err)
require.Equal(t, preExitBalanceToConsume, postExitBalanceToConsume)
preEarliestExitEpoch, err := preForkState.EarliestExitEpoch()
require.NoError(t, err)
postEarliestExitEpoch, err := mSt.EarliestExitEpoch()
require.NoError(t, err)
require.Equal(t, preEarliestExitEpoch, postEarliestExitEpoch)
preConsolidationBalanceToConsume, err := preForkState.ConsolidationBalanceToConsume()
require.NoError(t, err)
postConsolidationBalanceToConsume, err := mSt.ConsolidationBalanceToConsume()
require.NoError(t, err)
require.Equal(t, preConsolidationBalanceToConsume, postConsolidationBalanceToConsume)
preEarliesConsolidationEoch, err := preForkState.EarliestConsolidationEpoch()
require.NoError(t, err)
postEarliestConsolidationEpoch, err := mSt.EarliestConsolidationEpoch()
require.NoError(t, err)
require.Equal(t, preEarliesConsolidationEoch, postEarliestConsolidationEpoch)
prePendingDeposits, err := preForkState.PendingDeposits()
require.NoError(t, err)
postPendingDeposits, err := mSt.PendingDeposits()
require.NoError(t, err)
require.DeepSSZEqual(t, prePendingDeposits, postPendingDeposits)
prePendingPartialWithdrawals, err := preForkState.PendingPartialWithdrawals()
require.NoError(t, err)
postPendingPartialWithdrawals, err := mSt.PendingPartialWithdrawals()
require.NoError(t, err)
require.DeepSSZEqual(t, prePendingPartialWithdrawals, postPendingPartialWithdrawals)
prePendingConsolidations, err := preForkState.PendingConsolidations()
require.NoError(t, err)
postPendingConsolidations, err := mSt.PendingConsolidations()
require.NoError(t, err)
require.DeepSSZEqual(t, prePendingConsolidations, postPendingConsolidations)
}

View File

@@ -7,6 +7,7 @@ go_library(
"beacon_committee.go",
"block.go",
"genesis.go",
"legacy.go",
"metrics.go",
"randao.go",
"rewards_penalties.go",
@@ -25,7 +26,6 @@ go_library(
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//container/trie:go_default_library",
@@ -53,6 +53,7 @@ go_test(
"attestation_test.go",
"beacon_committee_test.go",
"block_test.go",
"legacy_test.go",
"private_access_fuzz_noop_test.go", # keep
"private_access_test.go",
"randao_test.go",
@@ -87,5 +88,6 @@ go_test(
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
],
)

View File

@@ -32,6 +32,9 @@ func ValidateNilAttestation(attestation ethpb.Att) error {
if attestation.GetData().Target == nil {
return errors.New("attestation's target can't be nil")
}
if attestation.IsSingle() {
return nil
}
if attestation.GetAggregationBits() == nil {
return errors.New("attestation's bitfield can't be nil")
}

View File

@@ -308,6 +308,16 @@ func TestValidateNilAttestation(t *testing.T) {
},
errString: "",
},
{
name: "single attestation",
attestation: &ethpb.SingleAttestation{
Data: &ethpb.AttestationData{
Target: &ethpb.Checkpoint{},
Source: &ethpb.Checkpoint{},
},
},
errString: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@@ -97,6 +97,13 @@ func TestVerifyBitfieldLength_OK(t *testing.T) {
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
}
func TestVerifyBitfieldLength_Incorrect(t *testing.T) {
helpers.ClearCache()
bf := bitfield.NewBitlist(1)
require.ErrorContains(t, "wanted participants bitfield length 2, got: 1", helpers.VerifyBitfieldLength(bf, 2))
}
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
helpers.ClearCache()

View File

@@ -0,0 +1,20 @@
package helpers
import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
// DepositRequestsStarted determines if the deposit requests have started.
func DepositRequestsStarted(beaconState state.BeaconState) bool {
if beaconState.Version() < version.Electra {
return false
}
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
if err != nil {
return false
}
return beaconState.Eth1DepositIndex() == requestsStartIndex
}

View File

@@ -0,0 +1,33 @@
package helpers_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/stretchr/testify/require"
)
// TestDepositRequestHaveStarted contains several test cases for depositRequestHaveStarted.
func TestDepositRequestHaveStarted(t *testing.T) {
t.Run("Version below Electra returns false", func(t *testing.T) {
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
result := helpers.DepositRequestsStarted(st)
require.False(t, result)
})
t.Run("Version is Electra or higher, no error, but Eth1DepositIndex != requestsStartIndex returns false", func(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, 1)
require.NoError(t, st.SetEth1DepositIndex(1))
result := helpers.DepositRequestsStarted(st)
require.False(t, result)
})
t.Run("Version is Electra or higher, no error, and Eth1DepositIndex == requestsStartIndex returns true", func(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, 1)
require.NoError(t, st.SetEth1DepositIndex(33))
require.NoError(t, st.SetDepositRequestsStartIndex(33))
result := helpers.DepositRequestsStarted(st)
require.True(t, result)
})
}

View File

@@ -22,7 +22,7 @@ import (
func BalanceChurnLimit(activeBalance primitives.Gwei) primitives.Gwei {
churn := max(
params.BeaconConfig().MinPerEpochChurnLimitElectra,
(uint64(activeBalance) / params.BeaconConfig().ChurnLimitQuotient),
uint64(activeBalance)/params.BeaconConfig().ChurnLimitQuotient,
)
return primitives.Gwei(churn - churn%params.BeaconConfig().EffectiveBalanceIncrement)
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -394,9 +393,9 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
effectiveBal := v.EffectiveBalance()
if bState.Version() >= version.Electra {
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/16)
randomByte := hashFunc(seedBuffer)
randomBytes := hashFunc(seedBuffer)
offset := (i % 16) * 2
randomValue := uint64(randomByte[offset]) | uint64(randomByte[offset+1])<<8
randomValue := uint64(randomBytes[offset]) | uint64(randomBytes[offset+1])<<8
if effectiveBal*fieldparams.MaxRandomValueElectra >= beaconConfig.MaxEffectiveBalanceElectra*randomValue {
return candidateIndex, nil
@@ -515,63 +514,6 @@ func LastActivatedValidatorIndex(ctx context.Context, st state.ReadOnlyBeaconSta
return lastActivatedvalidatorIndex, nil
}
// hasETH1WithdrawalCredential returns whether the validator has an ETH1
// Withdrawal prefix. It assumes that the caller has a lock on the state
func HasETH1WithdrawalCredential(val interfaces.WithWithdrawalCredentials) bool {
if val == nil {
return false
}
return isETH1WithdrawalCredential(val.GetWithdrawalCredentials())
}
func isETH1WithdrawalCredential(creds []byte) bool {
return bytes.HasPrefix(creds, []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte})
}
// HasCompoundingWithdrawalCredential checks if the validator has a compounding withdrawal credential.
// New in Electra EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
//
// Spec definition:
//
// def has_compounding_withdrawal_credential(validator: Validator) -> bool:
// """
// Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal credential.
// """
// return is_compounding_withdrawal_credential(validator.withdrawal_credentials)
func HasCompoundingWithdrawalCredential(v interfaces.WithWithdrawalCredentials) bool {
if v == nil {
return false
}
return IsCompoundingWithdrawalCredential(v.GetWithdrawalCredentials())
}
// IsCompoundingWithdrawalCredential checks if the credentials are a compounding withdrawal credential.
//
// Spec definition:
//
// def is_compounding_withdrawal_credential(withdrawal_credentials: Bytes32) -> bool:
// return withdrawal_credentials[:1] == COMPOUNDING_WITHDRAWAL_PREFIX
func IsCompoundingWithdrawalCredential(creds []byte) bool {
return bytes.HasPrefix(creds, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte})
}
// HasExecutionWithdrawalCredentials checks if the validator has an execution withdrawal credential or compounding credential.
// New in Electra EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
//
// Spec definition:
//
// def has_execution_withdrawal_credential(validator: Validator) -> bool:
// """
// Check if ``validator`` has a 0x01 or 0x02 prefixed withdrawal credential.
// """
// return has_compounding_withdrawal_credential(validator) or has_eth1_withdrawal_credential(validator)
func HasExecutionWithdrawalCredentials(v interfaces.WithWithdrawalCredentials) bool {
if v == nil {
return false
}
return HasCompoundingWithdrawalCredential(v) || HasETH1WithdrawalCredential(v)
}
// IsSameWithdrawalCredentials returns true if both validators have the same withdrawal credentials.
//
// return a.withdrawal_credentials[12:] == b.withdrawal_credentials[12:]
@@ -606,10 +548,10 @@ func IsFullyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, e
// Electra / EIP-7251 logic
if fork >= version.Electra {
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch() <= epoch
return val.HasExecutionWithdrawalCredentials() && val.WithdrawableEpoch() <= epoch
}
return HasETH1WithdrawalCredential(val) && val.WithdrawableEpoch() <= epoch
return val.HasETH1WithdrawalCredentials() && val.WithdrawableEpoch() <= epoch
}
// IsPartiallyWithdrawableValidator returns whether the validator is able to perform a
@@ -637,7 +579,7 @@ func IsPartiallyWithdrawableValidator(val state.ReadOnlyValidator, balance uint6
// """
// Check if ``validator`` is partially withdrawable.
// """
// max_effective_balance = get_validator_max_effective_balance(validator)
// max_effective_balance = get_max_effective_balance(validator)
// has_max_effective_balance = validator.effective_balance == max_effective_balance # [Modified in Electra:EIP7251]
// has_excess_balance = balance > max_effective_balance # [Modified in Electra:EIP7251]
// return (
@@ -650,7 +592,7 @@ func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balanc
hasMaxBalance := val.EffectiveBalance() == maxEB
hasExcessBalance := balance > maxEB
return HasExecutionWithdrawalCredentials(val) &&
return val.HasExecutionWithdrawalCredentials() &&
hasMaxBalance &&
hasExcessBalance
}
@@ -670,14 +612,14 @@ func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balanc
func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
hasMaxBalance := val.EffectiveBalance() == params.BeaconConfig().MaxEffectiveBalance
hasExcessBalance := balance > params.BeaconConfig().MaxEffectiveBalance
return HasETH1WithdrawalCredential(val) && hasExcessBalance && hasMaxBalance
return val.HasETH1WithdrawalCredentials() && hasExcessBalance && hasMaxBalance
}
// ValidatorMaxEffectiveBalance returns the maximum effective balance for a validator.
//
// Spec definition:
//
// def get_validator_max_effective_balance(validator: Validator) -> Gwei:
// def get_max_effective_balance(validator: Validator) -> Gwei:
// """
// Get max effective balance for ``validator``.
// """
@@ -686,7 +628,7 @@ func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balanc
// else:
// return MIN_ACTIVATION_BALANCE
func ValidatorMaxEffectiveBalance(val state.ReadOnlyValidator) uint64 {
if HasCompoundingWithdrawalCredential(val) {
if val.HasCompoundingWithdrawalCredentials() {
return params.BeaconConfig().MaxEffectiveBalanceElectra
}
return params.BeaconConfig().MinActivationBalance

View File

@@ -910,13 +910,15 @@ func TestProposerIndexFromCheckpoint(t *testing.T) {
func TestHasETH1WithdrawalCredentials(t *testing.T) {
creds := []byte{0xFA, 0xCC}
v := &ethpb.Validator{WithdrawalCredentials: creds}
require.Equal(t, false, helpers.HasETH1WithdrawalCredential(v))
roV, err := state_native.NewValidator(v)
require.NoError(t, err)
require.Equal(t, false, roV.HasETH1WithdrawalCredentials())
creds = []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC}
v = &ethpb.Validator{WithdrawalCredentials: creds}
require.Equal(t, true, helpers.HasETH1WithdrawalCredential(v))
roV, err = state_native.NewValidator(v)
require.NoError(t, err)
require.Equal(t, true, roV.HasETH1WithdrawalCredentials())
// No Withdrawal cred
v = &ethpb.Validator{}
require.Equal(t, false, helpers.HasETH1WithdrawalCredential(v))
}
func TestHasCompoundingWithdrawalCredential(t *testing.T) {
@@ -931,11 +933,12 @@ func TestHasCompoundingWithdrawalCredential(t *testing.T) {
{"Does not have compounding withdrawal credential",
&ethpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{0x00}, 32)},
false},
{"Handles nil case", nil, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, helpers.HasCompoundingWithdrawalCredential(tt.validator))
roV, err := state_native.NewValidator(tt.validator)
require.NoError(t, err)
assert.Equal(t, tt.want, roV.HasCompoundingWithdrawalCredentials())
})
}
}
@@ -955,11 +958,12 @@ func TestHasExecutionWithdrawalCredentials(t *testing.T) {
{"Does not have compounding withdrawal credential or eth1 withdrawal credential",
&ethpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{0x00}, 32)},
false},
{"Handles nil case", nil, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, helpers.HasExecutionWithdrawalCredentials(tt.validator))
roV, err := state_native.NewValidator(tt.validator)
require.NoError(t, err)
assert.Equal(t, tt.want, roV.HasExecutionWithdrawalCredentials())
})
}
}

View File

@@ -697,7 +697,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
excessBlobGas, err := payload.ExcessBlobGas()
require.NoError(t, err)
executionHeader := &v11.ExecutionPayloadHeaderElectra{
executionHeader := &v11.ExecutionPayloadHeaderDeneb{
ParentHash: payload.ParentHash(),
FeeRecipient: payload.FeeRecipient(),
StateRoot: payload.StateRoot(),
@@ -762,7 +762,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
excessBlobGas, err := payload.ExcessBlobGas()
require.NoError(t, err)
executionHeader := &v11.ExecutionPayloadHeaderElectra{
executionHeader := &v11.ExecutionPayloadHeaderDeneb{
ParentHash: payload.ParentHash(),
FeeRecipient: payload.FeeRecipient(),
StateRoot: payload.StateRoot(),

View File

@@ -99,6 +99,15 @@ func CanUpgradeToElectra(slot primitives.Slot) bool {
return epochStart && electraEpoch
}
// CanUpgradeToFulu returns true if the input `slot` can upgrade to Fulu.
// Spec code:
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == FULU_FORK_EPOCH
func CanUpgradeToFulu(slot primitives.Slot) bool {
epochStart := slots.IsEpochStart(slot)
fuluEpoch := slots.ToEpoch(slot) == params.BeaconConfig().FuluForkEpoch
return epochStart && fuluEpoch
}
// CanProcessEpoch checks the eligibility to process epoch.
// The epoch can be processed at the end of the last slot of every epoch.
//

View File

@@ -1,6 +1,7 @@
package time_test
import (
"fmt"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
@@ -83,76 +84,6 @@ func TestNextEpoch_OK(t *testing.T) {
}
}
func TestCanUpgradeToAltair(t *testing.T) {
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.AltairForkEpoch = 5
params.OverrideBeaconConfig(bc)
tests := []struct {
name string
slot primitives.Slot
want bool
}{
{
name: "not epoch start",
slot: 1,
want: false,
},
{
name: "not altair epoch",
slot: params.BeaconConfig().SlotsPerEpoch,
want: false,
},
{
name: "altair epoch",
slot: primitives.Slot(params.BeaconConfig().AltairForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToAltair(tt.slot); got != tt.want {
t.Errorf("canUpgradeToAltair() = %v, want %v", got, tt.want)
}
})
}
}
func TestCanUpgradeBellatrix(t *testing.T) {
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.BellatrixForkEpoch = 5
params.OverrideBeaconConfig(bc)
tests := []struct {
name string
slot primitives.Slot
want bool
}{
{
name: "not epoch start",
slot: 1,
want: false,
},
{
name: "not bellatrix epoch",
slot: params.BeaconConfig().SlotsPerEpoch,
want: false,
},
{
name: "bellatrix epoch",
slot: primitives.Slot(params.BeaconConfig().BellatrixForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToBellatrix(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToBellatrix() = %v, want %v", got, tt.want)
}
})
}
}
func TestCanProcessEpoch_TrueOnEpochsLastSlot(t *testing.T) {
tests := []struct {
slot primitives.Slot
@@ -264,107 +195,79 @@ func TestAltairCompatible(t *testing.T) {
}
}
func TestCanUpgradeToCapella(t *testing.T) {
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.CapellaForkEpoch = 5
params.OverrideBeaconConfig(bc)
tests := []struct {
name string
slot primitives.Slot
want bool
}{
{
name: "not epoch start",
slot: 1,
want: false,
},
{
name: "not capella epoch",
slot: params.BeaconConfig().SlotsPerEpoch,
want: false,
},
{
name: "capella epoch",
slot: primitives.Slot(params.BeaconConfig().CapellaForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToCapella(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToCapella() = %v, want %v", got, tt.want)
}
})
}
}
func TestCanUpgradeTo(t *testing.T) {
beaconConfig := params.BeaconConfig()
func TestCanUpgradeToDeneb(t *testing.T) {
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.DenebForkEpoch = 5
params.OverrideBeaconConfig(bc)
tests := []struct {
name string
slot primitives.Slot
want bool
outerTestCases := []struct {
name string
forkEpoch *primitives.Epoch
upgradeFunc func(primitives.Slot) bool
}{
{
name: "not epoch start",
slot: 1,
want: false,
name: "Altair",
forkEpoch: &beaconConfig.AltairForkEpoch,
upgradeFunc: time.CanUpgradeToAltair,
},
{
name: "not deneb epoch",
slot: params.BeaconConfig().SlotsPerEpoch,
want: false,
name: "Bellatrix",
forkEpoch: &beaconConfig.BellatrixForkEpoch,
upgradeFunc: time.CanUpgradeToBellatrix,
},
{
name: "deneb epoch",
slot: primitives.Slot(params.BeaconConfig().DenebForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
want: true,
name: "Capella",
forkEpoch: &beaconConfig.CapellaForkEpoch,
upgradeFunc: time.CanUpgradeToCapella,
},
{
name: "Deneb",
forkEpoch: &beaconConfig.DenebForkEpoch,
upgradeFunc: time.CanUpgradeToDeneb,
},
{
name: "Electra",
forkEpoch: &beaconConfig.ElectraForkEpoch,
upgradeFunc: time.CanUpgradeToElectra,
},
{
name: "Fulu",
forkEpoch: &beaconConfig.FuluForkEpoch,
upgradeFunc: time.CanUpgradeToFulu,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToDeneb(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToDeneb() = %v, want %v", got, tt.want)
}
})
}
}
func TestCanUpgradeToElectra(t *testing.T) {
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.ElectraForkEpoch = 5
params.OverrideBeaconConfig(bc)
tests := []struct {
name string
slot primitives.Slot
want bool
}{
{
name: "not epoch start",
slot: 1,
want: false,
},
{
name: "not electra epoch",
slot: params.BeaconConfig().SlotsPerEpoch,
want: false,
},
{
name: "electra epoch",
slot: primitives.Slot(params.BeaconConfig().ElectraForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToElectra(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToElectra() = %v, want %v", got, tt.want)
}
})
for _, otc := range outerTestCases {
params.SetupTestConfigCleanup(t)
*otc.forkEpoch = 5
params.OverrideBeaconConfig(beaconConfig)
innerTestCases := []struct {
name string
slot primitives.Slot
want bool
}{
{
name: "not epoch start",
slot: 1,
want: false,
},
{
name: fmt.Sprintf("not %s epoch", otc.name),
slot: params.BeaconConfig().SlotsPerEpoch,
want: false,
},
{
name: fmt.Sprintf("%s epoch", otc.name),
slot: primitives.Slot(*otc.forkEpoch) * params.BeaconConfig().SlotsPerEpoch,
want: true,
},
}
for _, itc := range innerTestCases {
t.Run(fmt.Sprintf("%s-%s", otc.name, itc.name), func(t *testing.T) {
if got := otc.upgradeFunc(itc.slot); got != itc.want {
t.Errorf("CanUpgradeTo%s() = %v, want %v", otc.name, got, itc.want)
}
})
}
}
}

View File

@@ -23,6 +23,7 @@ go_library(
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/execution:go_default_library",
"//beacon-chain/core/fulu:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition/interop:go_default_library",

View File

@@ -17,6 +17,7 @@ import (
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/execution"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/fulu"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/features"
@@ -298,7 +299,7 @@ func ProcessSlotsCore(ctx context.Context, span trace.Span, state state.BeaconSt
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
var err error
if time.CanProcessEpoch(state) {
if state.Version() == version.Electra {
if state.Version() >= version.Electra {
if err = electra.ProcessEpoch(ctx, state); err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
}
@@ -322,9 +323,11 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
defer span.End()
var err error
slot := state.Slot()
upgraded := false
if time.CanUpgradeToAltair(state.Slot()) {
if time.CanUpgradeToAltair(slot) {
state, err = altair.UpgradeToAltair(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
@@ -333,7 +336,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
upgraded = true
}
if time.CanUpgradeToBellatrix(state.Slot()) {
if time.CanUpgradeToBellatrix(slot) {
state, err = execution.UpgradeToBellatrix(state)
if err != nil {
tracing.AnnotateError(span, err)
@@ -342,7 +345,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
upgraded = true
}
if time.CanUpgradeToCapella(state.Slot()) {
if time.CanUpgradeToCapella(slot) {
state, err = capella.UpgradeToCapella(state)
if err != nil {
tracing.AnnotateError(span, err)
@@ -351,7 +354,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
upgraded = true
}
if time.CanUpgradeToDeneb(state.Slot()) {
if time.CanUpgradeToDeneb(slot) {
state, err = deneb.UpgradeToDeneb(state)
if err != nil {
tracing.AnnotateError(span, err)
@@ -360,7 +363,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
upgraded = true
}
if time.CanUpgradeToElectra(state.Slot()) {
if time.CanUpgradeToElectra(slot) {
state, err = electra.UpgradeToElectra(state)
if err != nil {
tracing.AnnotateError(span, err)
@@ -369,8 +372,17 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
upgraded = true
}
if time.CanUpgradeToFulu(slot) {
state, err = fulu.UpgradeToFulu(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
upgraded = true
}
if upgraded {
log.Debugf("upgraded state to %s", version.String(state.Version()))
log.WithField("version", version.String(state.Version())).Info("Upgraded state to")
}
return state, nil
@@ -391,11 +403,15 @@ func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interf
)
}
if uint64(len(body.AttesterSlashings())) > params.BeaconConfig().MaxAttesterSlashings {
maxSlashings := params.BeaconConfig().MaxAttesterSlashings
if body.Version() >= version.Electra {
maxSlashings = params.BeaconConfig().MaxAttesterSlashingsElectra
}
if uint64(len(body.AttesterSlashings())) > maxSlashings {
return nil, fmt.Errorf(
"number of attester slashings (%d) in block body exceeds allowed threshold of %d",
len(body.AttesterSlashings()),
params.BeaconConfig().MaxAttesterSlashings,
maxSlashings,
)
}

View File

@@ -437,6 +437,25 @@ func TestProcessBlock_OverMaxAttesterSlashings(t *testing.T) {
assert.ErrorContains(t, want, err)
}
func TestProcessBlock_OverMaxAttesterSlashingsElectra(t *testing.T) {
maxSlashings := params.BeaconConfig().MaxAttesterSlashingsElectra
b := &ethpb.SignedBeaconBlockElectra{
Block: &ethpb.BeaconBlockElectra{
Body: &ethpb.BeaconBlockBodyElectra{
AttesterSlashings: make([]*ethpb.AttesterSlashingElectra, maxSlashings+1),
},
},
}
want := fmt.Sprintf("number of attester slashings (%d) in block body exceeds allowed threshold of %d",
len(b.Block.Body.AttesterSlashings), params.BeaconConfig().MaxAttesterSlashingsElectra)
s, err := state_native.InitializeFromProtoUnsafeElectra(&ethpb.BeaconStateElectra{})
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block())
assert.ErrorContains(t, want, err)
}
func TestProcessBlock_OverMaxAttestations(t *testing.T) {
b := &ethpb.SignedBeaconBlock{
Block: &ethpb.BeaconBlock{
@@ -665,6 +684,20 @@ func TestProcessSlots_ThroughElectraEpoch(t *testing.T) {
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
}
func TestProcessSlots_ThroughFuluEpoch(t *testing.T) {
transition.SkipSlotCache.Disable()
params.SetupTestConfigCleanup(t)
conf := params.BeaconConfig()
conf.FuluForkEpoch = 5
params.OverrideBeaconConfig(conf)
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10)
require.NoError(t, err)
require.Equal(t, version.Fulu, st.Version())
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
}
func TestProcessSlotsUsingNextSlotCache(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
r := []byte{'a'}

View File

@@ -9,25 +9,35 @@ import (
// SlashingParamsPerVersion returns the slashing parameters for the given state version.
func SlashingParamsPerVersion(v int) (slashingQuotient, proposerRewardQuotient, whistleblowerRewardQuotient uint64, err error) {
cfg := params.BeaconConfig()
switch v {
case version.Phase0:
slashingQuotient = cfg.MinSlashingPenaltyQuotient
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
case version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
case version.Bellatrix, version.Capella, version.Deneb:
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
case version.Electra:
if v >= version.Electra {
slashingQuotient = cfg.MinSlashingPenaltyQuotientElectra
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotientElectra
default:
err = errors.New("unknown state version")
return
}
if v >= version.Bellatrix {
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
return
}
if v >= version.Altair {
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
return
}
if v >= version.Phase0 {
slashingQuotient = cfg.MinSlashingPenaltyQuotient
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
return
}
err = errors.Errorf("unknown state version %s", version.String(v))
return
}

View File

@@ -75,7 +75,7 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
// Compute exit queue epoch.
if s.Version() < version.Electra {
// Relevant spec code from deneb:
// Relevant spec code from phase0:
//
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])

View File

@@ -101,6 +101,7 @@ type NoHeadAccessDatabase interface {
SaveLightClientBootstrap(ctx context.Context, blockRoot []byte, bootstrap interfaces.LightClientBootstrap) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot) error
}
// HeadAccessDatabase defines a struct with access to reading chain head data.

View File

@@ -41,6 +41,7 @@ go_library(
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",

View File

@@ -227,10 +227,7 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
return ErrDeleteJustifiedAndFinalized
}
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {
return err
}
if err := tx.Bucket(blockParentRootIndicesBucket).Delete(root[:]); err != nil {
if err := s.deleteBlock(tx, root[:]); err != nil {
return err
}
s.blockCache.Del(string(root[:]))
@@ -238,6 +235,89 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
})
}
// DeleteHistoricalDataBeforeSlot deletes all blocks and states before the given slot.
// This function deletes data from the following buckets:
// - blocksBucket
// - blockParentRootIndicesBucket
// - finalizedBlockRootsIndexBucket
// - stateBucket
// - stateSummaryBucket
// - blockRootValidatorHashesBucket
// - blockSlotIndicesBucket
// - stateSlotIndicesBucket
func (s *Store) DeleteHistoricalDataBeforeSlot(ctx context.Context, cutoffSlot primitives.Slot) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteHistoricalDataBeforeSlot")
defer span.End()
// Collect slot/root pairs to perform deletions in a separate read only transaction.
var (
roots [][]byte
slts []primitives.Slot
)
err := s.db.View(func(tx *bolt.Tx) error {
var err error
roots, slts, err = blockRootsBySlotRange(ctx, tx.Bucket(blockSlotIndicesBucket), primitives.Slot(0), cutoffSlot, nil, nil, nil)
if err != nil {
return errors.Wrap(err, "could not retrieve block roots")
}
return nil
})
if err != nil {
return errors.Wrap(err, "could not retrieve block roots and slots")
}
// Perform all deletions in a single transaction for atomicity
return s.db.Update(func(tx *bolt.Tx) error {
for _, root := range roots {
// Delete block
if err = s.deleteBlock(tx, root); err != nil {
return err
}
// Delete finalized block roots index
if err = tx.Bucket(finalizedBlockRootsIndexBucket).Delete(root); err != nil {
return errors.Wrap(err, "could not delete finalized block root index")
}
// Delete state
if err = tx.Bucket(stateBucket).Delete(root); err != nil {
return errors.Wrap(err, "could not delete state")
}
// Delete state summary
if err = tx.Bucket(stateSummaryBucket).Delete(root); err != nil {
return errors.Wrap(err, "could not delete state summary")
}
// Delete validator entries
if err = s.deleteValidatorHashes(tx, root); err != nil {
return errors.Wrap(err, "could not delete validators")
}
}
for _, slot := range slts {
// Delete slot indices
if err = tx.Bucket(blockSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(slot)); err != nil {
return errors.Wrap(err, "could not delete block slot index")
}
if err = tx.Bucket(stateSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(slot)); err != nil {
return errors.Wrap(err, "could not delete state slot index")
}
}
// Delete all caches after we have deleted everything from buckets.
// This is done after the buckets are deleted to avoid any issues in case of transaction rollback.
for _, root := range roots {
// Delete block from cache
s.blockCache.Del(string(root))
// Delete state summary from cache
s.stateSummaryCache.delete([32]byte(root))
}
return nil
})
}
// SaveBlock to the db.
func (s *Store) SaveBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock) error {
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlock")
@@ -609,7 +689,7 @@ func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter
// We retrieve block roots that match a filter criteria of slot ranges, if specified.
filtersMap := f.Filters()
rootsBySlotRange, err := blockRootsBySlotRange(
rootsBySlotRange, _, err := blockRootsBySlotRange(
ctx,
tx.Bucket(blockSlotIndicesBucket),
filtersMap[filters.StartSlot],
@@ -627,6 +707,7 @@ func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter
// that list of roots to lookup the block. These block will
// meet the filter criteria.
indices := lookupValuesForIndices(ctx, indicesByBucket, tx)
keys := rootsBySlotRange
if len(indices) > 0 {
// If we have found indices that meet the filter criteria, and there are also
@@ -653,13 +734,13 @@ func blockRootsBySlotRange(
ctx context.Context,
bkt *bolt.Bucket,
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
) ([][]byte, error) {
) ([][]byte, []primitives.Slot, error) {
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
defer span.End()
// Return nothing when all slot parameters are missing
if startSlotEncoded == nil && endSlotEncoded == nil && startEpochEncoded == nil && endEpochEncoded == nil {
return [][]byte{}, nil
return [][]byte{}, nil, nil
}
var startSlot, endSlot primitives.Slot
@@ -680,11 +761,11 @@ func blockRootsBySlotRange(
if startEpochOk && endEpochOk {
startSlot, err = slots.EpochStart(startEpoch)
if err != nil {
return nil, err
return nil, nil, err
}
endSlot, err = slots.EpochStart(endEpoch)
if err != nil {
return nil, err
return nil, nil, err
}
endSlot = endSlot + params.BeaconConfig().SlotsPerEpoch - 1
}
@@ -695,14 +776,15 @@ func blockRootsBySlotRange(
return key != nil && bytes.Compare(key, max) <= 0
}
if endSlot < startSlot {
return nil, errInvalidSlotRange
return nil, nil, errInvalidSlotRange
}
rootsRange := endSlot.SubSlot(startSlot).Div(step)
roots := make([][]byte, 0, rootsRange)
var slts []primitives.Slot
c := bkt.Cursor()
for k, v := c.Seek(min); conditional(k, max); k, v = c.Next() {
slot := bytesutil.BytesToSlotBigEndian(k)
if step > 1 {
slot := bytesutil.BytesToSlotBigEndian(k)
if slot.SubSlot(startSlot).Mod(step) != 0 {
continue
}
@@ -713,8 +795,9 @@ func blockRootsBySlotRange(
splitRoots = append(splitRoots, v[i:i+32])
}
roots = append(roots, splitRoots...)
slts = append(slts, slot)
}
return roots, nil
return roots, slts, nil
}
// blockRootsBySlot retrieves the block roots by slot
@@ -813,9 +896,9 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
}
case hasElectraKey(enc):
case HasElectraKey(enc):
rawBlock = &ethpb.SignedBeaconBlockElectra{}
if err := rawBlock.UnmarshalSSZ(enc[len(electraKey):]); err != nil {
if err := rawBlock.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Electra block")
}
case hasElectraBlindKey(enc):
@@ -823,6 +906,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
if err := rawBlock.UnmarshalSSZ(enc[len(electraBlindKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded Electra block")
}
case hasFuluKey(enc):
rawBlock = &ethpb.SignedBeaconBlockFulu{}
if err := rawBlock.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Fulu block")
}
case hasFuluBlindKey(enc):
rawBlock = &ethpb.SignedBlindedBeaconBlockFulu{}
if err := rawBlock.UnmarshalSSZ(enc[len(fuluBlindKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal blinded Fulu block")
}
default:
// Marshal block bytes to phase 0 beacon block.
rawBlock = &ethpb.SignedBeaconBlock{}
@@ -851,32 +944,79 @@ func encodeBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
}
func keyForBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
switch blk.Version() {
case version.Electra:
v := blk.Version()
if v >= version.Fulu {
if blk.IsBlinded() {
return fuluBlindKey, nil
}
return fuluKey, nil
}
if v >= version.Electra {
if blk.IsBlinded() {
return electraBlindKey, nil
}
return electraKey, nil
case version.Deneb:
return ElectraKey, nil
}
if v >= version.Deneb {
if blk.IsBlinded() {
return denebBlindKey, nil
}
return denebKey, nil
case version.Capella:
}
if v >= version.Capella {
if blk.IsBlinded() {
return capellaBlindKey, nil
}
return capellaKey, nil
case version.Bellatrix:
}
if v >= version.Bellatrix {
if blk.IsBlinded() {
return bellatrixBlindKey, nil
}
return bellatrixKey, nil
case version.Altair:
return altairKey, nil
case version.Phase0:
return nil, nil
default:
return nil, fmt.Errorf("unsupported block version: %v", blk.Version())
}
if v >= version.Altair {
return altairKey, nil
}
if v >= version.Phase0 {
return nil, nil
}
return nil, fmt.Errorf("unsupported block version: %v", blk.Version())
}
func (s *Store) deleteBlock(tx *bolt.Tx, root []byte) error {
if err := tx.Bucket(blocksBucket).Delete(root); err != nil {
return errors.Wrap(err, "could not delete block")
}
if err := tx.Bucket(blockParentRootIndicesBucket).Delete(root); err != nil {
return errors.Wrap(err, "could not delete block parent indices")
}
return nil
}
func (s *Store) deleteValidatorHashes(tx *bolt.Tx, root []byte) error {
ok, err := s.isStateValidatorMigrationOver()
if err != nil {
return err
}
if !ok {
return nil
}
// Delete the validator hash index
if err = tx.Bucket(blockRootValidatorHashesBucket).Delete(root); err != nil {
return errors.Wrap(err, "could not delete validator index")
}
return nil
}

View File

@@ -2,9 +2,13 @@ package kv
import (
"context"
"fmt"
bolt "go.etcd.io/bbolt"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
@@ -353,6 +357,189 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) {
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
}
func TestStore_HistoricalDataBeforeSlot(t *testing.T) {
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
db := setupDB(t)
ctx := context.Background()
// Save genesis block root
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
// Create and save blocks for 4 epochs
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
require.NoError(t, db.SaveBlocks(ctx, blks))
// Mark state validator migration as complete
err := db.db.Update(func(tx *bolt.Tx) error {
return tx.Bucket(migrationsBucket).Put(migrationStateValidatorsKey, migrationCompleted)
})
require.NoError(t, err)
migrated, err := db.isStateValidatorMigrationOver()
require.NoError(t, err)
require.Equal(t, true, migrated)
// Create state summaries and states for each block
ss := make([]*ethpb.StateSummary, len(blks))
states := make([]state.BeaconState, len(blks))
for i, blk := range blks {
slot := blk.Block().Slot()
r, err := blk.Block().HashTreeRoot()
require.NoError(t, err)
// Create and save state summary
ss[i] = &ethpb.StateSummary{
Slot: slot,
Root: r[:],
}
// Create and save state with validator entries
vals := make([]*ethpb.Validator, 2)
for j := range vals {
vals[j] = &ethpb.Validator{
PublicKey: bytesutil.PadTo([]byte{byte(i*j + 1)}, 48),
WithdrawalCredentials: bytesutil.PadTo([]byte{byte(i*j + 2)}, 32),
}
}
st, err := util.NewBeaconState(func(state *ethpb.BeaconState) error {
state.Validators = vals
state.Slot = slot
return nil
})
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, st, r))
states[i] = st
// Verify validator entries are saved to db
valsActual, err := db.validatorEntries(ctx, r)
require.NoError(t, err)
for j, val := range valsActual {
require.DeepEqual(t, vals[j], val)
}
}
require.NoError(t, db.SaveStateSummaries(ctx, ss))
// Verify slot indices exist before deletion
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
for i := uint64(0); i < slotsPerEpoch; i++ {
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist", i)
}
return nil
})
require.NoError(t, err)
// Delete data before slot at epoch 1
require.NoError(t, db.DeleteHistoricalDataBeforeSlot(ctx, primitives.Slot(slotsPerEpoch)))
// Verify blocks from epoch 0 are deleted
for i := uint64(0); i < slotsPerEpoch; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
// Check block is deleted
retrievedBlocks, err := db.BlocksBySlot(ctx, primitives.Slot(i))
require.NoError(t, err)
assert.Equal(t, 0, len(retrievedBlocks))
// Verify block does not exist
assert.Equal(t, false, db.HasBlock(ctx, root))
// Verify block parent root does not exist
err = db.db.View(func(tx *bolt.Tx) error {
require.Equal(t, 0, len(tx.Bucket(blockParentRootIndicesBucket).Get(root[:])))
return nil
})
require.NoError(t, err)
// Verify state is deleted
hasState := db.HasState(ctx, root)
assert.Equal(t, false, hasState)
// Verify state summary is deleted
hasSummary := db.HasStateSummary(ctx, root)
assert.Equal(t, false, hasSummary)
// Verify validator hashes for block roots are deleted
err = db.db.View(func(tx *bolt.Tx) error {
assert.Equal(t, 0, len(tx.Bucket(blockRootValidatorHashesBucket).Get(root[:])))
return nil
})
require.NoError(t, err)
}
// Verify slot indices are deleted
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
for i := uint64(0); i < slotsPerEpoch; i++ {
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.Equal(t, 0, len(blockSlotBkt.Get(slot)), fmt.Sprintf("Expected block slot index to be deleted, slot: %d", slot))
assert.Equal(t, 0, len(stateSlotBkt.Get(slot)), fmt.Sprintf("Expected state slot index to be deleted, slot: %d", slot))
}
return nil
})
require.NoError(t, err)
// Verify blocks from epochs 1-3 still exist
for i := slotsPerEpoch; i < slotsPerEpoch*4; i++ {
root, err := blks[i].Block().HashTreeRoot()
require.NoError(t, err)
// Verify block exists
assert.Equal(t, true, db.HasBlock(ctx, root))
// Verify remaining block parent root exists, except last slot since we store parent roots of each block.
if i < slotsPerEpoch*4-1 {
err = db.db.View(func(tx *bolt.Tx) error {
require.NotNil(t, tx.Bucket(blockParentRootIndicesBucket).Get(root[:]), fmt.Sprintf("Expected block parent index to be deleted, slot: %d", i))
return nil
})
require.NoError(t, err)
}
// Verify state exists
hasState := db.HasState(ctx, root)
assert.Equal(t, true, hasState)
// Verify state summary exists
hasSummary := db.HasStateSummary(ctx, root)
assert.Equal(t, true, hasSummary)
// Verify slot indices still exist
err = db.db.View(func(tx *bolt.Tx) error {
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist")
return nil
})
require.NoError(t, err)
// Verify validator entries still exist
valsActual, err := db.validatorEntries(ctx, root)
require.NoError(t, err)
assert.NotNil(t, valsActual)
// Verify remaining validator hashes for block roots exists
err = db.db.View(func(tx *bolt.Tx) error {
assert.NotNil(t, tx.Bucket(blockRootValidatorHashesBucket).Get(root[:]))
return nil
})
require.NoError(t, err)
}
}
func TestStore_GenesisBlock(t *testing.T) {
db := setupDB(t)
ctx := context.Background()

View File

@@ -52,11 +52,12 @@ func hasDenebBlindKey(enc []byte) bool {
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
}
func hasElectraKey(enc []byte) bool {
if len(electraKey) >= len(enc) {
// HasElectraKey verifies if the encoding is Electra compatible.
func HasElectraKey(enc []byte) bool {
if len(ElectraKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(electraKey)], electraKey)
return bytes.Equal(enc[:len(ElectraKey)], ElectraKey)
}
func hasElectraBlindKey(enc []byte) bool {
@@ -65,3 +66,17 @@ func hasElectraBlindKey(enc []byte) bool {
}
return bytes.Equal(enc[:len(electraBlindKey)], electraBlindKey)
}
func hasFuluKey(enc []byte) bool {
if len(fuluKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(fuluKey)], fuluKey)
}
func hasFuluBlindKey(enc []byte) bool {
if len(fuluBlindKey) >= len(enc) {
return false
}
return bytes.Equal(enc[:len(fuluBlindKey)], fuluBlindKey)
}

View File

@@ -109,6 +109,7 @@ var Buckets = [][]byte{
stateValidatorsBucket,
lightClientUpdatesBucket,
lightClientBootstrapBucket,
lightClientSyncCommitteeBucket,
// Indices buckets.
blockSlotIndicesBucket,
stateSlotIndicesBucket,

View File

@@ -7,6 +7,8 @@ import (
"github.com/golang/snappy"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
light_client "github.com/prysmaticlabs/prysm/v5/consensus-types/light-client"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -35,9 +37,35 @@ func (s *Store) SaveLightClientBootstrap(ctx context.Context, blockRoot []byte,
_, span := trace.StartSpan(ctx, "BeaconDB.SaveLightClientBootstrap")
defer span.End()
bootstrapCopy, err := light_client.NewWrappedBootstrap(proto.Clone(bootstrap.Proto()))
if err != nil {
return errors.Wrap(err, "could not clone light client bootstrap")
}
syncCommitteeHash, err := bootstrapCopy.CurrentSyncCommittee().HashTreeRoot()
if err != nil {
return errors.Wrap(err, "could not hash current sync committee")
}
return s.db.Update(func(tx *bolt.Tx) error {
syncCommitteeBucket := tx.Bucket(lightClientSyncCommitteeBucket)
syncCommitteeAlreadyExists := syncCommitteeBucket.Get(syncCommitteeHash[:]) != nil
if !syncCommitteeAlreadyExists {
enc, err := bootstrapCopy.CurrentSyncCommittee().MarshalSSZ()
if err != nil {
return errors.Wrap(err, "could not marshal current sync committee")
}
if err := syncCommitteeBucket.Put(syncCommitteeHash[:], enc); err != nil {
return errors.Wrap(err, "could not save current sync committee")
}
}
err = bootstrapCopy.SetCurrentSyncCommittee(createEmptySyncCommittee())
if err != nil {
return errors.Wrap(err, "could not set current sync committee to zero while saving")
}
bkt := tx.Bucket(lightClientBootstrapBucket)
enc, err := encodeLightClientBootstrap(bootstrap)
enc, err := encodeLightClientBootstrap(bootstrapCopy, syncCommitteeHash)
if err != nil {
return err
}
@@ -50,20 +78,49 @@ func (s *Store) LightClientBootstrap(ctx context.Context, blockRoot []byte) (int
defer span.End()
var bootstrap interfaces.LightClientBootstrap
var syncCommitteeHash []byte
err := s.db.View(func(tx *bolt.Tx) error {
bkt := tx.Bucket(lightClientBootstrapBucket)
syncCommitteeBucket := tx.Bucket(lightClientSyncCommitteeBucket)
enc := bkt.Get(blockRoot)
if enc == nil {
return nil
}
var err error
bootstrap, err = decodeLightClientBootstrap(enc)
bootstrap, syncCommitteeHash, err = decodeLightClientBootstrap(enc)
if err != nil {
return errors.Wrap(err, "could not decode light client bootstrap")
}
var syncCommitteeBytes = syncCommitteeBucket.Get(syncCommitteeHash)
if syncCommitteeBytes == nil {
return errors.New("sync committee not found")
}
syncCommittee := &ethpb.SyncCommittee{}
if err := syncCommittee.UnmarshalSSZ(syncCommitteeBytes); err != nil {
return errors.Wrap(err, "could not unmarshal sync committee")
}
err = bootstrap.SetCurrentSyncCommittee(syncCommittee)
if err != nil {
return errors.Wrap(err, "could not set current sync committee while retrieving")
}
return err
})
return bootstrap, err
}
func encodeLightClientBootstrap(bootstrap interfaces.LightClientBootstrap) ([]byte, error) {
func createEmptySyncCommittee() *ethpb.SyncCommittee {
syncCom := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
for i := 0; uint64(i) < params.BeaconConfig().SyncCommitteeSize; i++ {
syncCom[i] = make([]byte, fieldparams.BLSPubkeyLength)
}
return &ethpb.SyncCommittee{
Pubkeys: syncCom,
AggregatePubkey: make([]byte, fieldparams.BLSPubkeyLength),
}
}
func encodeLightClientBootstrap(bootstrap interfaces.LightClientBootstrap, syncCommitteeHash [32]byte) ([]byte, error) {
key, err := keyForLightClientUpdate(bootstrap.Version())
if err != nil {
return nil, err
@@ -72,48 +129,56 @@ func encodeLightClientBootstrap(bootstrap interfaces.LightClientBootstrap) ([]by
if err != nil {
return nil, errors.Wrap(err, "could not marshal light client bootstrap")
}
fullEnc := make([]byte, len(key)+len(enc))
fullEnc := make([]byte, len(key)+32+len(enc))
copy(fullEnc, key)
copy(fullEnc[len(key):], enc)
return snappy.Encode(nil, fullEnc), nil
copy(fullEnc[len(key):len(key)+32], syncCommitteeHash[:])
copy(fullEnc[len(key)+32:], enc)
compressedEnc := snappy.Encode(nil, fullEnc)
return compressedEnc, nil
}
func decodeLightClientBootstrap(enc []byte) (interfaces.LightClientBootstrap, error) {
func decodeLightClientBootstrap(enc []byte) (interfaces.LightClientBootstrap, []byte, error) {
var err error
enc, err = snappy.Decode(nil, enc)
if err != nil {
return nil, errors.Wrap(err, "could not snappy decode light client bootstrap")
return nil, nil, errors.Wrap(err, "could not snappy decode light client bootstrap")
}
var m proto.Message
var syncCommitteeHash []byte
switch {
case hasAltairKey(enc):
bootstrap := &ethpb.LightClientBootstrapAltair{}
if err := bootstrap.UnmarshalSSZ(enc[len(altairKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Altair light client bootstrap")
if err := bootstrap.UnmarshalSSZ(enc[len(altairKey)+32:]); err != nil {
return nil, nil, errors.Wrap(err, "could not unmarshal Altair light client bootstrap")
}
m = bootstrap
syncCommitteeHash = enc[len(altairKey) : len(altairKey)+32]
case hasCapellaKey(enc):
bootstrap := &ethpb.LightClientBootstrapCapella{}
if err := bootstrap.UnmarshalSSZ(enc[len(capellaKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Capella light client bootstrap")
if err := bootstrap.UnmarshalSSZ(enc[len(capellaKey)+32:]); err != nil {
return nil, nil, errors.Wrap(err, "could not unmarshal Capella light client bootstrap")
}
m = bootstrap
syncCommitteeHash = enc[len(capellaKey) : len(capellaKey)+32]
case hasDenebKey(enc):
bootstrap := &ethpb.LightClientBootstrapDeneb{}
if err := bootstrap.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Deneb light client bootstrap")
if err := bootstrap.UnmarshalSSZ(enc[len(denebKey)+32:]); err != nil {
return nil, nil, errors.Wrap(err, "could not unmarshal Deneb light client bootstrap")
}
m = bootstrap
case hasElectraKey(enc):
syncCommitteeHash = enc[len(denebKey) : len(denebKey)+32]
case HasElectraKey(enc):
bootstrap := &ethpb.LightClientBootstrapElectra{}
if err := bootstrap.UnmarshalSSZ(enc[len(electraKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Electra light client bootstrap")
if err := bootstrap.UnmarshalSSZ(enc[len(ElectraKey)+32:]); err != nil {
return nil, nil, errors.Wrap(err, "could not unmarshal Electra light client bootstrap")
}
m = bootstrap
syncCommitteeHash = enc[len(ElectraKey) : len(ElectraKey)+32]
default:
return nil, errors.New("decoding of saved light client bootstrap is unsupported")
return nil, nil, errors.New("decoding of saved light client bootstrap is unsupported")
}
return light_client.NewWrappedBootstrap(m)
bootstrap, err := light_client.NewWrappedBootstrap(m)
return bootstrap, syncCommitteeHash, err
}
func (s *Store) LightClientUpdates(ctx context.Context, startPeriod, endPeriod uint64) (map[uint64]interfaces.LightClientUpdate, error) {
@@ -212,9 +277,9 @@ func decodeLightClientUpdate(enc []byte) (interfaces.LightClientUpdate, error) {
return nil, errors.Wrap(err, "could not unmarshal Deneb light client update")
}
m = update
case hasElectraKey(enc):
case HasElectraKey(enc):
update := &ethpb.LightClientUpdateElectra{}
if err := update.UnmarshalSSZ(enc[len(electraKey):]); err != nil {
if err := update.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
return nil, errors.Wrap(err, "could not unmarshal Electra light client update")
}
m = update
@@ -227,7 +292,7 @@ func decodeLightClientUpdate(enc []byte) (interfaces.LightClientUpdate, error) {
func keyForLightClientUpdate(v int) ([]byte, error) {
switch v {
case version.Electra:
return electraKey, nil
return ElectraKey, nil
case version.Deneb:
return denebKey, nil
case version.Capella:

View File

@@ -18,6 +18,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
bolt "go.etcd.io/bbolt"
"google.golang.org/protobuf/proto"
)
@@ -122,7 +123,7 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
StateRoot: sampleRoot,
BodyRoot: sampleRoot,
},
Execution: &enginev1.ExecutionPayloadHeaderElectra{
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
@@ -140,6 +141,34 @@ func createUpdate(t *testing.T, v int) (interfaces.LightClientUpdate, error) {
require.NoError(t, err)
st, err = util.NewBeaconStateElectra()
require.NoError(t, err)
case version.Fulu:
slot = primitives.Slot(config.FuluForkEpoch * primitives.Epoch(config.SlotsPerEpoch)).Add(1)
header, err = light_client.NewWrappedHeader(&pb.LightClientHeaderDeneb{
Beacon: &pb.BeaconBlockHeader{
Slot: 1,
ProposerIndex: primitives.ValidatorIndex(rand.Int()),
ParentRoot: sampleRoot,
StateRoot: sampleRoot,
BodyRoot: sampleRoot,
},
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
},
ExecutionBranch: sampleExecutionBranch,
})
require.NoError(t, err)
st, err = util.NewBeaconStateFulu()
require.NoError(t, err)
default:
return nil, fmt.Errorf("unsupported version %s", version.String(v))
}
@@ -167,6 +196,7 @@ func TestStore_LightClientUpdate_CanSaveRetrieve(t *testing.T) {
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 3
params.OverrideBeaconConfig(cfg)
db := setupDB(t)
@@ -213,6 +243,17 @@ func TestStore_LightClientUpdate_CanSaveRetrieve(t *testing.T) {
err = db.SaveLightClientUpdate(ctx, period, update)
require.NoError(t, err)
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
})
t.Run("Fulu", func(t *testing.T) {
update, err := createUpdate(t, version.Fulu)
require.NoError(t, err)
period := uint64(1)
err = db.SaveLightClientUpdate(ctx, period, update)
require.NoError(t, err)
retrievedUpdate, err := db.LightClientUpdate(ctx, period)
require.NoError(t, err)
require.DeepEqual(t, update, retrievedUpdate, "retrieved update does not match saved update")
@@ -571,7 +612,13 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootAltair"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap, retrievedBootstrap, "retrieved bootstrap does not match saved bootstrap")
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
})
t.Run("Capella", func(t *testing.T) {
@@ -586,7 +633,13 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootCapella"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap, retrievedBootstrap, "retrieved bootstrap does not match saved bootstrap")
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
})
t.Run("Deneb", func(t *testing.T) {
@@ -601,7 +654,13 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootDeneb"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap, retrievedBootstrap, "retrieved bootstrap does not match saved bootstrap")
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
savedBranch, err := bootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
})
t.Run("Electra", func(t *testing.T) {
@@ -616,10 +675,138 @@ func TestStore_LightClientBootstrap_CanSaveRetrieve(t *testing.T) {
retrievedBootstrap, err := db.LightClientBootstrap(ctx, []byte("blockRootElectra"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap, retrievedBootstrap, "retrieved bootstrap does not match saved bootstrap")
require.DeepEqual(t, bootstrap.Header(), retrievedBootstrap.Header(), "retrieved bootstrap header does not match saved bootstrap header")
require.DeepEqual(t, bootstrap.CurrentSyncCommittee(), retrievedBootstrap.CurrentSyncCommittee(), "retrieved bootstrap sync committee does not match saved bootstrap sync committee")
savedBranch, err := bootstrap.CurrentSyncCommitteeBranchElectra()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap.CurrentSyncCommitteeBranchElectra()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap sync committee branch does not match saved bootstrap sync committee branch")
})
}
func TestStore_LightClientBootstrap_MultipleBootstrapsWithSameSyncCommittee(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 0
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.EpochsPerSyncCommitteePeriod = 1
params.OverrideBeaconConfig(cfg)
db := setupDB(t)
ctx := context.Background()
bootstrap1, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().AltairForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
bootstrap2, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().AltairForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
randomSyncCommittee := createRandomSyncCommittee()
err = bootstrap1.SetCurrentSyncCommittee(randomSyncCommittee)
require.NoError(t, err)
err = bootstrap2.SetCurrentSyncCommittee(randomSyncCommittee)
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootAltair1"), bootstrap1)
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootAltair2"), bootstrap2)
require.NoError(t, err)
retrievedBootstrap1, err := db.LightClientBootstrap(ctx, []byte("blockRootAltair1"))
require.NoError(t, err)
retrievedBootstrap2, err := db.LightClientBootstrap(ctx, []byte("blockRootAltair2"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap1.Header(), retrievedBootstrap1.Header(), "retrieved bootstrap1 header does not match saved bootstrap1 header")
require.DeepEqual(t, randomSyncCommittee, retrievedBootstrap1.CurrentSyncCommittee(), "retrieved bootstrap1 sync committee does not match saved bootstrap1 sync committee")
savedBranch, err := bootstrap1.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap1.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap1 sync committee branch does not match saved bootstrap1 sync committee branch")
require.DeepEqual(t, bootstrap2.Header(), retrievedBootstrap2.Header(), "retrieved bootstrap1 header does not match saved bootstrap1 header")
require.DeepEqual(t, randomSyncCommittee, retrievedBootstrap2.CurrentSyncCommittee(), "retrieved bootstrap1 sync committee does not match saved bootstrap1 sync committee")
savedBranch2, err := bootstrap2.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch2, err := retrievedBootstrap2.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch2, retrievedBranch2, "retrieved bootstrap1 sync committee branch does not match saved bootstrap1 sync committee branch")
// Ensure that the sync committee is only stored once
err = db.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(lightClientSyncCommitteeBucket)
require.NotNil(t, bucket)
count := bucket.Stats().KeyN
require.Equal(t, 1, count)
return nil
})
require.NoError(t, err)
}
func TestStore_LightClientBootstrap_MultipleBootstrapsWithDifferentSyncCommittees(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.AltairForkEpoch = 0
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.EpochsPerSyncCommitteePeriod = 1
params.OverrideBeaconConfig(cfg)
db := setupDB(t)
ctx := context.Background()
bootstrap1, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().AltairForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
bootstrap2, err := createDefaultLightClientBootstrap(primitives.Slot(uint64(params.BeaconConfig().AltairForkEpoch) * uint64(params.BeaconConfig().SlotsPerEpoch)))
require.NoError(t, err)
err = bootstrap1.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = bootstrap2.SetCurrentSyncCommittee(createRandomSyncCommittee())
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootAltair1"), bootstrap1)
require.NoError(t, err)
err = db.SaveLightClientBootstrap(ctx, []byte("blockRootAltair2"), bootstrap2)
require.NoError(t, err)
retrievedBootstrap1, err := db.LightClientBootstrap(ctx, []byte("blockRootAltair1"))
require.NoError(t, err)
retrievedBootstrap2, err := db.LightClientBootstrap(ctx, []byte("blockRootAltair2"))
require.NoError(t, err)
require.DeepEqual(t, bootstrap1.Header(), retrievedBootstrap1.Header(), "retrieved bootstrap1 header does not match saved bootstrap1 header")
require.DeepEqual(t, bootstrap1.CurrentSyncCommittee(), retrievedBootstrap1.CurrentSyncCommittee(), "retrieved bootstrap1 sync committee does not match saved bootstrap1 sync committee")
savedBranch, err := bootstrap1.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch, err := retrievedBootstrap1.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch, retrievedBranch, "retrieved bootstrap1 sync committee branch does not match saved bootstrap1 sync committee branch")
require.DeepEqual(t, bootstrap2.Header(), retrievedBootstrap2.Header(), "retrieved bootstrap1 header does not match saved bootstrap1 header")
require.DeepEqual(t, bootstrap2.CurrentSyncCommittee(), retrievedBootstrap2.CurrentSyncCommittee(), "retrieved bootstrap1 sync committee does not match saved bootstrap1 sync committee")
savedBranch2, err := bootstrap2.CurrentSyncCommitteeBranch()
require.NoError(t, err)
retrievedBranch2, err := retrievedBootstrap2.CurrentSyncCommitteeBranch()
require.NoError(t, err)
require.DeepEqual(t, savedBranch2, retrievedBranch2, "retrieved bootstrap1 sync committee branch does not match saved bootstrap1 sync committee branch")
// Ensure that the sync committee is stored twice
err = db.db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket(lightClientSyncCommitteeBucket)
require.NotNil(t, bucket)
count := bucket.Stats().KeyN
require.Equal(t, 2, count)
return nil
})
require.NoError(t, err)
}
func createDefaultLightClientBootstrap(currentSlot primitives.Slot) (interfaces.LightClientBootstrap, error) {
currentEpoch := slots.ToEpoch(currentSlot)
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize

View File

@@ -18,8 +18,9 @@ var (
registrationBucket = []byte("registration")
// Light Client Updates Bucket
lightClientUpdatesBucket = []byte("light-client-updates")
lightClientBootstrapBucket = []byte("light-client-bootstrap")
lightClientUpdatesBucket = []byte("light-client-updates")
lightClientBootstrapBucket = []byte("light-client-bootstrap")
lightClientSyncCommitteeBucket = []byte("light-client-sync-committee")
// Deprecated: This bucket was migrated in PR 6461. Do not use, except for migrations.
slotsHasObjectBucket = []byte("slots-has-objects")
@@ -52,8 +53,10 @@ var (
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
denebKey = []byte("deneb")
denebBlindKey = []byte("blind-deneb")
electraKey = []byte("electra")
ElectraKey = []byte("electra")
electraBlindKey = []byte("blind-electra")
fuluKey = []byte("fulu")
fuluBlindKey = []byte("blind-fulu")
// block root included in the beacon state used by weak subjectivity initial sync
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")

View File

@@ -357,7 +357,7 @@ func (s *Store) processElectra(ctx context.Context, pbState *ethpb.BeaconStateEl
if err != nil {
return err
}
encodedState := snappy.Encode(nil, append(electraKey, rawObj...))
encodedState := snappy.Encode(nil, append(ElectraKey, rawObj...))
if err := bucket.Put(rootHash, encodedState); err != nil {
return err
}
@@ -517,9 +517,22 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
}
switch {
case hasElectraKey(enc):
case hasFuluKey(enc):
protoState := &ethpb.BeaconStateFulu{}
if err := protoState.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal encoding for Electra")
}
ok, err := s.isStateValidatorMigrationOver()
if err != nil {
return nil, err
}
if ok {
protoState.Validators = validatorEntries
}
return statenative.InitializeFromProtoUnsafeFulu(protoState)
case HasElectraKey(enc):
protoState := &ethpb.BeaconStateElectra{}
if err := protoState.UnmarshalSSZ(enc[len(electraKey):]); err != nil {
if err := protoState.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal encoding for Electra")
}
ok, err := s.isStateValidatorMigrationOver()
@@ -675,7 +688,20 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
if err != nil {
return nil, err
}
return snappy.Encode(nil, append(electraKey, rawObj...)), nil
return snappy.Encode(nil, append(ElectraKey, rawObj...)), nil
case version.Fulu:
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateFulu)
if !ok {
return nil, errors.New("non valid inner state")
}
if rState == nil {
return nil, errors.New("nil state")
}
rawObj, err := rState.MarshalSSZ()
if err != nil {
return nil, err
}
return snappy.Encode(nil, append(fuluKey, rawObj...)), nil
default:
return nil, errors.New("invalid inner state")
}
@@ -699,7 +725,7 @@ func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*et
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
valKey := idxBkt.Get(blockRoot[:])
if len(valKey) == 0 {
return errors.Errorf("invalid compressed validator keys length")
return errors.Errorf("validator keys not found for given block root: %x", blockRoot)
}
// decompress the keys and check if they are of proper length.

View File

@@ -139,7 +139,7 @@ func TestState_CanSaveRetrieve(t *testing.T) {
st, err := util.NewBeaconStateElectra()
require.NoError(t, err)
require.NoError(t, st.SetSlot(100))
p, err := blocks.WrappedExecutionPayloadHeaderElectra(&enginev1.ExecutionPayloadHeaderElectra{
p, err := blocks.WrappedExecutionPayloadHeaderDeneb(&enginev1.ExecutionPayloadHeaderDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),

View File

@@ -0,0 +1,38 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["pruner.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/pruner",
visibility = [
"//beacon-chain:__subpackages__",
],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["pruner_test.go"],
embed = [":go_default_library"],
deps = [
"//beacon-chain/db/testing:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots/testing:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -0,0 +1,174 @@
package pruner
import (
"context"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/iface"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"github.com/sirupsen/logrus"
)
var log = logrus.WithField("prefix", "db-pruner")
type ServiceOption func(*Service)
// WithRetentionPeriod allows the user to specify a different data retention period than the spec default.
// The retention period is specified in epochs, and must be >= MIN_EPOCHS_FOR_BLOCK_REQUESTS.
func WithRetentionPeriod(retentionEpochs primitives.Epoch) ServiceOption {
return func(s *Service) {
defaultRetentionEpochs := helpers.MinEpochsForBlockRequests() + 1
if retentionEpochs < defaultRetentionEpochs {
log.WithField("userEpochs", retentionEpochs).
WithField("minRequired", defaultRetentionEpochs).
Warn("Retention period too low, using minimum required value")
}
s.ps = pruneStartSlotFunc(retentionEpochs)
}
}
func WithSlotTicker(slotTicker slots.Ticker) ServiceOption {
return func(s *Service) {
s.slotTicker = slotTicker
}
}
// Service defines a service that prunes beacon chain DB based on MIN_EPOCHS_FOR_BLOCK_REQUESTS.
type Service struct {
ctx context.Context
db db.Database
ps func(current primitives.Slot) primitives.Slot
prunedUpto primitives.Slot
done chan struct{}
slotTicker slots.Ticker
backfillWaiter func() error
initSyncWaiter func() error
}
func New(ctx context.Context, db iface.Database, genesisTime uint64, initSyncWaiter, backfillWaiter func() error, opts ...ServiceOption) (*Service, error) {
p := &Service{
ctx: ctx,
db: db,
ps: pruneStartSlotFunc(helpers.MinEpochsForBlockRequests() + 1), // Default retention epochs is MIN_EPOCHS_FOR_BLOCK_REQUESTS + 1 from the current slot.
done: make(chan struct{}),
slotTicker: slots.NewSlotTicker(slots.StartTime(genesisTime, 0), params.BeaconConfig().SecondsPerSlot),
initSyncWaiter: initSyncWaiter,
backfillWaiter: backfillWaiter,
}
for _, o := range opts {
o(p)
}
return p, nil
}
func (p *Service) Start() {
log.Info("Starting Beacon DB pruner service")
p.run()
}
func (p *Service) Stop() error {
log.Info("Stopping Beacon DB pruner service")
close(p.done)
return nil
}
func (p *Service) Status() error {
return nil
}
func (p *Service) run() {
if p.initSyncWaiter != nil {
log.Info("Waiting for initial sync service to complete before starting pruner")
if err := p.initSyncWaiter(); err != nil {
log.WithError(err).Error("Failed to start database pruner, error waiting for initial sync completion")
return
}
}
if p.backfillWaiter != nil {
log.Info("Waiting for backfill service to complete before starting pruner")
if err := p.backfillWaiter(); err != nil {
log.WithError(err).Error("Failed to start database pruner, error waiting for backfill completion")
return
}
}
defer p.slotTicker.Done()
for {
select {
case <-p.ctx.Done():
log.Debug("Stopping Beacon DB pruner service", "prunedUpto", p.prunedUpto)
return
case <-p.done:
log.Debug("Stopping Beacon DB pruner service", "prunedUpto", p.prunedUpto)
return
case slot := <-p.slotTicker.C():
// Prune at the middle of every epoch since we do a lot of things around epoch boundaries.
if slots.SinceEpochStarts(slot) != (params.BeaconConfig().SlotsPerEpoch / 2) {
continue
}
if err := p.prune(slot); err != nil {
log.WithError(err).Error("Failed to prune database")
}
}
}
}
// prune deletes historical chain data beyond the pruneSlot.
func (p *Service) prune(slot primitives.Slot) error {
// Prune everything up to this slot (inclusive).
pruneUpto := p.ps(slot)
// Can't prune beyond genesis.
if pruneUpto == 0 {
return nil
}
// Skip if already pruned up to this slot.
if pruneUpto <= p.prunedUpto {
return nil
}
log.WithFields(logrus.Fields{
"pruneUpto": pruneUpto,
}).Debug("Pruning chain data")
tt := time.Now()
if err := p.db.DeleteHistoricalDataBeforeSlot(p.ctx, pruneUpto); err != nil {
return errors.Wrapf(err, "could not delete upto slot %d", pruneUpto)
}
log.WithFields(logrus.Fields{
"prunedUpto": pruneUpto,
"duration": time.Since(tt),
"currentSlot": slot,
}).Debug("Successfully pruned chain data")
// Update pruning checkpoint.
p.prunedUpto = pruneUpto
return nil
}
// pruneStartSlotFunc returns the function to determine the start slot to start pruning.
func pruneStartSlotFunc(retentionEpochs primitives.Epoch) func(primitives.Slot) primitives.Slot {
return func(current primitives.Slot) primitives.Slot {
if retentionEpochs > slots.MaxSafeEpoch() {
retentionEpochs = slots.MaxSafeEpoch()
}
offset := slots.UnsafeEpochStart(retentionEpochs)
if offset >= current {
return 0
}
return current - offset
}
}

View File

@@ -0,0 +1,135 @@
package pruner
import (
"context"
"testing"
"time"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/util"
slottest "github.com/prysmaticlabs/prysm/v5/time/slots/testing"
"github.com/sirupsen/logrus"
dbtest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/testing/require"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestPruner_PruningConditions(t *testing.T) {
tests := []struct {
name string
synced bool
backfillCompleted bool
expectedLog string
}{
{
name: "Not synced",
synced: false,
backfillCompleted: true,
expectedLog: "Waiting for initial sync service to complete before starting pruner",
},
{
name: "Backfill incomplete",
synced: true,
backfillCompleted: false,
expectedLog: "Waiting for backfill service to complete before starting pruner",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
hook := logTest.NewGlobal()
ctx, cancel := context.WithCancel(context.Background())
beaconDB := dbtest.SetupDB(t)
slotTicker := &slottest.MockTicker{Channel: make(chan primitives.Slot)}
waitChan := make(chan struct{})
waiter := func() error {
close(waitChan)
return nil
}
var initSyncWaiter, backfillWaiter func() error
if !tt.synced {
initSyncWaiter = waiter
}
if !tt.backfillCompleted {
backfillWaiter = waiter
}
p, err := New(ctx, beaconDB, uint64(time.Now().Unix()), initSyncWaiter, backfillWaiter, WithSlotTicker(slotTicker))
require.NoError(t, err)
go p.Start()
<-waitChan
cancel()
if tt.expectedLog != "" {
require.LogsContain(t, hook, tt.expectedLog)
}
require.NoError(t, p.Stop())
})
}
}
func TestPruner_PruneSuccess(t *testing.T) {
ctx := context.Background()
beaconDB := dbtest.SetupDB(t)
// Create and save some blocks at different slots
var blks []*eth.SignedBeaconBlock
for slot := primitives.Slot(1); slot <= 32; slot++ {
blk := util.NewBeaconBlock()
blk.Block.Slot = slot
wsb, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
blks = append(blks, blk)
}
// Create pruner with retention of 2 epochs (64 slots)
retentionEpochs := primitives.Epoch(2)
slotTicker := &slottest.MockTicker{Channel: make(chan primitives.Slot)}
p, err := New(
ctx,
beaconDB,
uint64(time.Now().Unix()),
nil,
nil,
WithSlotTicker(slotTicker),
)
require.NoError(t, err)
p.ps = func(current primitives.Slot) primitives.Slot {
return current - primitives.Slot(retentionEpochs)*params.BeaconConfig().SlotsPerEpoch
}
// Start pruner and trigger at middle of 3rd epoch (slot 80)
go p.Start()
currentSlot := primitives.Slot(80) // Middle of 3rd epoch
slotTicker.Channel <- currentSlot
// Send the same slot again to ensure the pruning operation completes
slotTicker.Channel <- currentSlot
for slot := primitives.Slot(1); slot <= 32; slot++ {
root, err := blks[slot-1].Block.HashTreeRoot()
require.NoError(t, err)
present := beaconDB.HasBlock(ctx, root)
if slot <= 16 { // These should be pruned
require.NoError(t, err)
require.Equal(t, false, present, "Expected present at slot %d to be pruned", slot)
} else { // These should remain
require.NoError(t, err)
require.Equal(t, true, present, "Expected present at slot %d to exist", slot)
}
}
require.NoError(t, p.Stop())
}

View File

@@ -15,6 +15,7 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/slasher/types:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
@@ -22,6 +23,7 @@ go_library(
"//io/file:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_golang_snappy//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -50,6 +52,7 @@ go_test(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",

View File

@@ -8,6 +8,7 @@ import (
slashertypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
@@ -177,8 +178,8 @@ func TestStore_PruneAttestations_OK(t *testing.T) {
if i > 0 {
source = target - 1
}
att1 := createAttestationWrapper(source, target, []uint64{attester1}, []byte{0})
att2 := createAttestationWrapper(source, target, []uint64{attester2}, []byte{1})
att1 := createAttestationWrapper(version.Phase0, source, target, []uint64{attester1}, []byte{0})
att2 := createAttestationWrapper(version.Phase0, source, target, []uint64{attester2}, []byte{1})
attestations = append(attestations, att1, att2)
}
}

Some files were not shown because too many files have changed in this diff Show More