Compare commits

...

88 Commits

Author SHA1 Message Date
Manu NALEPA
7fcd5a5460 PeerDAS: Generate private key to match subnets. 2024-06-11 10:46:44 +02:00
Manu NALEPA
d0f2789e25 CustodyColumnSubnets: Returns a slice instead of a map 2024-06-10 22:02:28 +02:00
Manu NALEPA
fe7cb7e5e2 PeerDAS: Remove unused ComputeExtendedMatrix and RecoverMatrix functions. 2024-06-10 22:02:24 +02:00
Manu NALEPA
0f74569012 peerDAS: Move functions in file. Add documentation. 2024-06-10 14:25:44 +02:00
Manu NALEPA
55e436eceb privKey: Improve logs. 2024-06-10 14:25:31 +02:00
Manu NALEPA
a6496b9408 PeerDAS: Gossip the reconstructed columns (#14079)
* PeerDAS: Broadcast not seen via gossip but reconstructed data columns.

* Address Nishant's comment.
2024-06-06 12:28:21 +02:00
Manu NALEPA
3b62c7bb4a PeerDAS: Only saved custodied columns even after reconstruction. (#14083) 2024-06-06 10:35:12 +02:00
Manu NALEPA
c109f28bdc recoverBlobs: Cover the 0 < blobsCount < fieldparams.MaxBlobsPerBlock case. (#14066)
* `recoverBlobs`: Cover the `0 < blobsCount < fieldparams.MaxBlobsPerBlock` case.

* Fix Nishant's comment.
2024-06-04 17:08:02 +08:00
Manu NALEPA
3f6e9c8420 PeerDAS: Withhold data on purpose. (#14076)
* Introduce hidden flag `data-columns-withhold-count`.

* Address Nishant's comment.
2024-06-04 10:38:59 +02:00
Manu NALEPA
54f2d91ef0 PeerDAS: Implement / use data column feed from database. (#14062)
* Remove some `_` identifiers.

* Blob storage: Implement a notifier system for data columns.

* `dataColumnSidecarByRootRPCHandler`: Remove ugly `time.Sleep(100 * time.Millisecond)`.

* Address Nishant's comment.
2024-06-04 09:52:25 +02:00
Manu NALEPA
b692050079 PeerDAS: Implement reconstruction. (#14036)
* Wrap errors, add logs.

* `missingColumnRequest`: Fix blobs <-> data columns mix.

* `ColumnIndices`: Return `map[uint64]bool` instead of `[fieldparams.NumberOfColumns]bool`.

* `DataColumnSidecars`: `interfaces.SignedBeaconBlock` ==> `interfaces.ReadOnlySignedBeaconBlock`.

We don't need any of the non read-only methods.

* Fix comments.

* `handleUnblidedBlock` ==> `handleUnblindedBlock`.

* `SaveDataColumn`: Move log from debug to trace.

If we attempt to save an already existing data column sidecar,
a debug log was printed.

This case could be quite common now with the data column reconstruction enabled.

* `sampling_data_columns.go` --> `data_columns_sampling.go`.

* Reconstruct data columns.
2024-05-30 10:00:09 +02:00
Nishant Das
ac7e5b6618 Fix Custody Columns (#14021) 2024-05-30 10:00:09 +02:00
Nishant Das
e82a2488ee Disable Evaluators For E2E (#14019)
* Hack E2E

* Fix it For Real

* Gofmt

* Remove
2024-05-30 10:00:09 +02:00
Nishant Das
ce1e6be98d Request Data Columns When Fetching Pending Blocks (#14007)
* Support Data Columns For By Root Requests

* Revert Config Changes

* Fix Panic

* Fix Process Block

* Fix Flags

* Lint

* Support Checkpoint Sync

* Manu's Review

* Add Support For Columns in Remaining Methods

* Unmarshal Uncorrectly
2024-05-30 10:00:09 +02:00
Manu NALEPA
463d45970b Fix CustodyColumns to comply with alpha-2 spectests. (#14008)
* Adding error wrapping

* Fix `CustodyColumnSubnets` tests.
2024-05-30 10:00:09 +02:00
Manu NALEPA
fbb07226f6 Fix beacon chain config. (#14017) 2024-05-30 10:00:09 +02:00
Nishant Das
f10805fe4b Adding Back Generated Objects (#14009) 2024-05-30 10:00:09 +02:00
Nishant Das
812dbc9eb9 Set Custody Count Correctly (#14004)
* Set Custody Count Correctly

* Fix Discovery Count
2024-05-30 10:00:09 +02:00
Manu NALEPA
de7e5deca5 Sample from peers some data columns. (#13980)
* PeerDAS: Implement sampling.

* `TestNewRateLimiter`: Fix with the new number of expected registered topics.
2024-05-30 10:00:09 +02:00
Nishant Das
8b40a6a591 Implement Data Columns By Range Request And Response Methods (#13972)
* Add Data Structure for New Request Type

* Add Data Column By Range Handler

* Add Data Column Request Methods

* Add new validation for columns by range requests

* Fix Build

* Allow Prysm Node To Fetch Data Columns

* Allow Prysm Node To Fetch Data Columns And Sync

* Bug Fixes For Interop

* GoFmt

* Use different var

* Manu's Review
2024-05-30 10:00:09 +02:00
Nishant Das
c49863b8c3 Enable E2E For PeerDAS (#13945)
* Enable E2E And Add Fixes

* Register Same Topic For Data Columns

* Initialize Capacity Of Slice

* Fix Initialization of Data Column Receiver

* Remove Mix In From Merkle Proof

* E2E: Subscribe to all subnets.

* Remove Index Check

* Remaining Bug Fixes to Get It Working

* Change Evaluator to Allow Test to Finish

* Fix Build

* Add Data Column Verification

* Fix LoopVar Bug

* Do Not Allocate Memory

* Update beacon-chain/blockchain/process_block.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Update beacon-chain/core/peerdas/helpers.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Update beacon-chain/core/peerdas/helpers.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Gofmt

* Fix It Again

* Fix Test Setup

* Fix Build

* Fix Trusted Setup panic

* Fix Trusted Setup panic

* Use New Test

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-05-30 10:00:09 +02:00
Justin Traglia
23b8bb0802 [PeerDAS] Upgrade c-kzg-4844 package (#13967)
* Upgrade c-kzg-4844 package

* Upgrade bazel deps
2024-05-30 10:00:09 +02:00
Manu NALEPA
1aca805036 SendDataColumnSidecarByRoot: Return RODataColumn instead of ROBlob. (#13957)
* `SendDataColumnSidecarByRoot`: Return `RODataColumn` instead of `ROBlob`.

* Make deepsource happier.
2024-05-30 10:00:09 +02:00
Manu NALEPA
559d7eae0f Spectests (#13940)
* Update `consensus_spec_version` to `v1.5.0-alpha.1`.

* `CustodyColumns`: Fix and implement spec tests.

* Make deepsource happy.

* `^uint64(0)` => `math.MaxUint64`.

* Fix `TestLoadConfigFile` test.
2024-05-30 10:00:09 +02:00
Nishant Das
ba74366801 Add DA Check For Data Columns (#13938)
* Add new DA check

* Exit early in the event no commitments exist.

* Gazelle

* Fix Mock Broadcaster

* Fix Test Setup

* Update beacon-chain/blockchain/process_block.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Manu's Review

* Fix Build

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-05-30 10:00:09 +02:00
Manu NALEPA
63bd276c48 Implement peer DAS proposer RPC (#13922)
* Remove capital letter from error messages.

* `[4]byte` => `[fieldparams.VersionLength]byte`.

* Prometheus: Remove extra `committee`.

They are probably due to a bad copy/paste.

Note: The name of the probe itself is remaining,
to ensure backward compatibility.

* Implement Proposer RPC for data columns.

* Fix TestProposer_ProposeBlock_OK test.

* Remove default peerDAS activation.

* `validateDataColumn`: Workaround to return a `VerifiedRODataColumn`
2024-05-30 10:00:09 +02:00
Nishant Das
0cd2786153 Update .bazelrc (#13931) 2024-05-30 10:00:09 +02:00
Manu NALEPA
32b2991dd1 Implement custody_subnet_count ENR field. (#13915)
https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
2024-05-30 10:00:09 +02:00
Manu NALEPA
5cfa5f3950 Peer das core (#13877)
* Bump `c-kzg-4844` lib to the `das` branch.

* Implement `MerkleProofKZGCommitments`.

* Implement `das-core.md`.

* Use `peerdas.CustodyColumnSubnets` and `peerdas.CustodyColumns`.

* `CustodyColumnSubnets`: Include `i` in the for loop.

* Remove `computeSubscribedColumnSubnet`.

* Remove `peerdas.CustodyColumns` out of the for loop.
2024-05-30 10:00:09 +02:00
Nishant Das
e7672c7801 Add Request And Response RPC Methods For Data Columns (#13909)
* Add RPC Handler

* Add Column Requests

* Update beacon-chain/db/filesystem/blob.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Update beacon-chain/p2p/rpc_topic_mappings.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* Manu's Review

* Manu's Review

* Interface Fixes

* mock manager

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2024-05-30 10:00:09 +02:00
Nishant Das
67c648bcad Add Data Column Gossip Handlers (#13894)
* Add Data Column Subscriber

* Add Data Column Vaidator

* Wire all Handlers In

* Fix Build

* Fix Test

* Fix IP in Test

* Fix IP in Test
2024-05-30 10:00:09 +02:00
Nishant Das
76cf2c3e0e Add Support For Discovery Of Column Subnets (#13883)
* Add Support For Discovery Of Column Subnets

* Lint for SubnetsPerNode

* Manu's Review

* Change to a better name
2024-05-30 10:00:09 +02:00
Nishant Das
b753b11356 add in networking params (#13866) 2024-05-30 10:00:09 +02:00
Nishant Das
acd93daf00 add it (#13865) 2024-05-30 10:00:09 +02:00
Nishant Das
57c9d59996 Add in column sidecars protos (#13862) 2024-05-30 10:00:09 +02:00
Nishant Das
7a4ecb6060 Restrict Dials From Discovery (#14052)
* Fix Excessive Subnet Dials

* Handle backoff in Iterator

* Slow Down Lookups

* Add Flag To Configure Dials

* Preston's Review

* Update cmd/beacon-chain/flags/base.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Reduce polling period

* Manu's Review

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-05-30 06:57:26 +00:00
terence
82f0ea5b11 Fix dependent root retrival for first epoch (#14059) 2024-05-30 04:15:00 +00:00
Sammy Rosso
6fddd13cb2 Multiple BN HTTP resolver (#13433)
* http resolver

* Redo

* Revert "Redo"

This reverts commit 5437c44ac2.

* Revert "http resolver"

This reverts commit 206207b530.

* Add host change to ValidatorClient + Validator

* Update mockgen

* Tidy

* Add mock validator

* Update gomock

* Gaz

* Solve interface issues

* Fix host

* Fix test

* Add tests

* Add endpoint change log

* Fix log

* Gen mock

* Fix test

* Fix deepsource

* Lint + deepsource

* Move to healthCheckRoutine

* Fix build errors

* Switch host to string

* Forgot a couple

* Radek' review

* Add PushProposerSettings to goroutine

* Radek' review

* James' review + test fix

* Radek' suggestion

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Check if new node is healthy

* Fix linter errors

* Add host switch logic to ChangeHost

* Lint + comment

* Fix messy merge

* rename ChangeHost to SetHost

* improve log

* remove log

* switch one node

* rename param

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: rkapka <radoslaw.kapka@gmail.com>
2024-05-29 01:43:06 +00:00
terence
43c7659d18 Fix dependent root retrival genesis case (#14053)
* Fix dependent root retrival genesis case

* Remove print
2024-05-28 16:50:41 +00:00
Radosław Kapka
2d15e53dab Eip 7549 core (#14037)
* interfaces move

* build fix

* remove annoying warning

* more build fixes

* review

* core code

* tests part 1

* tests part 2

* TranslateParticipation doesn't need Electra

* remove unused function

* pending atts don't need Electra

* tests part 3

* build fixes

* review

* remove newline

* review

* fix test
2024-05-28 13:56:36 +00:00
Radosław Kapka
2f2152e039 Only log error when aggregator check fails (#14046)
* Only log error when aggregator check fails

* review
2024-05-27 18:05:46 +00:00
Preston Van Loon
2542189efc eip-7251: process_effective_balance_updates (#14003)
* eip-7251: process_effective_balance_updates

Spectests for process_effective_balance_updates

process_effective_balance_updates unit tests

* PR feedback from the amazing @rkapka

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-05-25 00:12:38 +00:00
terence
8e6d39a44b Support electra blob type for by range request (#14047)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-05-24 18:56:40 +00:00
terence
c35889d4c6 Fix CommitteeAssignments to not return every validator (#14039)
* Rewrite CommitteeAssignments to not return every validator

* Potuz's feedback
2024-05-24 16:56:42 +00:00
Sammy Rosso
10dedd5ced Fix race conditions + cleanup (#14041) 2024-05-23 22:07:16 +00:00
james-prysm
d2966a4c5b Electra core transition operations (#14001)
* adding electra operations

* enabling spec tests

* adding electra process epoch

* skipping spec tests for now

* Update testing/spectest/minimal/electra/fork_transition/BUILD.bazel

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* fixing naming

* gaz

* fixing more bazel build stuff

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-05-23 18:01:38 +00:00
kasey
62b5c43d87 paranoid underflow protection without error handling (#14044)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-05-23 17:57:53 +00:00
kasey
b04baa93cd convert ZeroWei to a func to avoid shared pointer (#14043)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-05-23 15:15:50 +00:00
james-prysm
2e84208169 WebFix develop (#14040)
* fixing issues introduced by PR 13593

* missed setting db

* linting
2024-05-23 14:07:30 +00:00
kasey
2265af58ae Unwrap payload bid (#14035)
* engine GetPayload returns entire response

* deprecate PayloadValueTo(Gwei|Wei)

* return entire bid from builder getter

* get bid value from api bid (not ExecutionData)

* plumb bid and bundle through BuildBlockParallel

* rm ValueInGwei

* removing wei/gwei fields from the payload wrappers

* commentary around the little-endian situation

* finish the job in BuildBlockParallel

* light self-review cleanup

* fix spectest mock

* restore engine timeout

* lint fixes

* de-duplicate imports

* remove errant comment

* James feedback

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-05-22 22:16:32 +00:00
Sammy Rosso
4d190c41cc Fix TestNodeHealth_Concurrently race condition (#14033) 2024-05-22 18:25:42 +00:00
Radosław Kapka
0fbb27d8e3 EIP-7549: Add aggregate attestation interfaces (#14029)
* interfaces move

* build fix

* remove annoying warning

* more build fixes

* review
2024-05-22 16:14:08 +00:00
Bhargava Shastry
3df3e84270 Fuzz ssz round trip (#14006)
* Initial commit

* Fuzz all generated ssz en/decoders.

* Make proto message comparison aware of nested proto messages and differing unknown fields therein.

* Don't name receiver variable if not necessary.

* Run gazelle
2024-05-21 20:09:49 +00:00
Radosław Kapka
30cc23c5de Substantial VC cleanup (#13593)
* Cleanup part 1

* Cleanup part 2

* Cleanup part 3

* remove lock field init

* doc for SignerConfig

* remove vars

* use full Keymanager word in function

* revert interface rename

* linter

* fix build issues

* review
2024-05-21 16:39:00 +00:00
kasey
9befb6bd06 g/wei math->primitives, some new helpers (#14026)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-05-20 15:40:46 +00:00
Radosław Kapka
8a12b78684 Remove Beacon API Postman collection (#14014) 2024-05-17 20:40:13 +00:00
kasey
46168607e8 Electra payload body engine methods (#14000)
* Combined v1/v2 payload body handling

* prevent overflows when dealing with electra fixture

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-05-17 20:31:07 +00:00
Preston Van Loon
1272b9e186 eip-7251: process_pending_balance_deposits (#14002)
* eip-7251: process_pending_balance_deposits

* Update beacon-chain/core/electra/balance_deposits_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Remove defensive check. A unit test shows nothing bad happens

* Safe sub to protect from underflow

* Use @kasey's idea for safer subtraction

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-05-15 17:29:38 +00:00
Preston Van Loon
fcbe19445a eip-7251: process consolidations (#13983)
* eip-7251: process_pending_consolidations and process_consolidations

* Consolidate unit tests + spectests

Fix failing spectest //testing/spectest/minimal/electra/operations:go_default_test

* Unskip consolidation processing for minimal spectests

* PR feedback

* Update beacon-chain/core/electra/consolidations_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/core/electra/consolidations_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Move consolidation limit check outside of the loop

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-05-15 13:50:33 +00:00
terence
2b4dffa87d Support Electra for WriteBlockChunk (#13999) 2024-05-15 02:52:42 +00:00
Preston Van Loon
49a6d02e12 Enable experimental_remote_downloader in CI. (#13996) 2024-05-14 18:04:39 +00:00
Preston Van Loon
2b06dfd4a3 Debugging attestation bitlist issues in minimal spec (#13997) 2024-05-14 13:56:00 +00:00
terence
6e81b4e84b Correctly return electra attestations for block getter (#13993) 2024-05-14 03:33:01 +00:00
terence
0de1282e1c Support version Electra for ForkchoiceUpdated (#13994)
* Support version Electra for ForkchoiceUpdated

* Update PbV3 version check
2024-05-13 22:21:18 +00:00
terence
e3db52ca1f Fix GetPayloadMethodV4 endpoint string (#13992) 2024-05-13 22:21:07 +00:00
Preston Van Loon
c5a36d4c70 eip-7251: queue_entire_balance_and_reset_validator, queue_excess_active_balance, and switch_to_compounding_validator with tests (#13982) 2024-05-13 18:12:38 +00:00
Preston Van Loon
e28b6695ba eip-7251: compute_consolidation_epoch_and_update_churn with tests (#13981)
tests for compute_consolidation_epoch_and_update_churn

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-05-13 14:25:07 +00:00
kasey
de177f74fb electra engine api support (#13978)
* electra engine api support

* add marshaling support for ExecutionPayloadElectra

* add receipts to json tests

* deep source

* simplify slice handling

* deep source lint about type/method order

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-05-12 20:19:08 +00:00
terence
e4310aef73 Update interop genesis for Electra (#13991) 2024-05-12 16:34:02 +00:00
terence
d71079e1d8 Add proposer suppot for Electra (#13987) 2024-05-12 13:55:01 +00:00
terence
c08d2f36b0 Add p2p support for Electra (#13985)
* Add p2p support for Electra

* Fix TestGossipTopicMappings_CorrectBlockType
2024-05-11 18:27:16 +00:00
terence
839a80e339 Add proposer gRPC suppot for Electra (#13984)
* Add proposer RPC suppot for Electra

* Kasey's feedback
2024-05-11 12:59:24 +00:00
Radosław Kapka
a35535043e Update state readme (#13890)
* README.md for the state package

* Update beacon-chain/state/state-native/README.md

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

* Revert "Update beacon-chain/state/state-native/README.md"

This reverts commit 6a4be3bae5.

---------

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2024-05-10 16:49:02 +00:00
Preston Van Loon
323dd7b22d Electra: (partial) spectests (#13977)
* Electra epoch transition, re-exports only

* Buildable electra spectests

Fix minimal tests that are passing on mainnet

* Skip failing tests
2024-05-10 14:09:09 +00:00
terence
102128ca2e Add electra DB (#13975)
* Add electra DB

* Fix typo

* Revert deep ssz change

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-05-09 21:49:17 +00:00
Preston Van Loon
f3dd75a2c4 eip-7251: updated slash_validator with tests (#13976)
Tests for updated slash_validator
2024-05-09 20:40:54 +00:00
Preston Van Loon
0869814a0e eip-7251: updated initiate_validator_exit with tests (#13974)
eip-7251: tests for initiate_validator_exit
2024-05-09 15:21:22 +00:00
Brandon Liu
41edee9fe9 use time.NewTimer() to avoid possible memory leaks (#13800)
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2024-05-08 17:41:47 +00:00
james-prysm
2fa3694746 Electra: upgrade (#13933)
* wip fork logic upgrade

* fixing replay and fork.go

* improving process function and adding tests for transition

* updating unit tests and temporarily removing some fields on state_trie.go

* updating state

* wip adding upgrade to electra code

* adding some comments

* adding spec tests

* fixing values used in state transition logic

* updating upgrade test

* gofmt

* avoid dup word linting

* fixing spec tests for fork

* gaz

* fixing tests

* improving unit test with new getters

* fixing bazel for minimal fork test

* adding bazel file

* Update beacon-chain/core/electra/upgrade.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* addressing some comments and adding more tests

* addressing more feedback

* one more feedback

* changing value to interface after talking to preston

* adding missed review feedback

* fixing linting

* noticed I was using the wrong function in the state upgrade

* fixing and ignoring some deepsource issues

* moving core electra validator functions to helper to remove circular dependencies in other PRs

* missed deepsource complaint

* Update upgrade.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/util/electra_state.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update testing/util/electra_state.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* addressing feedback

* removing deepsoure ignore comments

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2024-05-08 15:16:31 +00:00
Nishant Das
e9606b3635 Handle Each Blob In Its Own Goroutine (#13959) 2024-05-07 22:21:27 +00:00
dependabot[bot]
ed7c4bb6a7 Bump golang.org/x/net from 0.21.0 to 0.23.0 (#13895)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.21.0 to 0.23.0.
- [Commits](https://github.com/golang/net/compare/v0.21.0...v0.23.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-07 21:57:45 +00:00
Preston Van Loon
c93fea4ec4 Update spectests to v1.5.0-alpha.2 (#13961) 2024-05-07 20:56:08 +00:00
Nishant Das
aa847991e0 Update Libp2p Dependencies (#13960)
* Update Libp2p

* Update Go Sum
2024-05-07 15:10:18 +00:00
Radosław Kapka
5f1b903bdf EIP-7549 beacon spec (#13946)
* EIP-7549 beacon spec

* reviews

* change signature of AttestingIndices
2024-05-07 13:48:23 +00:00
terence
49f3531aed Remove unused validator map copy method (#13954) 2024-05-07 00:47:05 +00:00
Preston Van Loon
9b2934f1f6 Electra: BeaconState implementation (#13919)
* Electra: Beacon State

* Electra: Beacon state fixes from PR 13919

* Add missing tests - part 1

* Split eip_7251_root.go into different files and reuse/share code with historical state summaries root. It's identical!

* Add missing tests - part 2

* deposit receipts start index getters and setters (#13947)

* adding in getters and setters for deposit receipts start index

* adding tests

* gaz

* Add missing tests - part 3 of 3

Update the electra withdrawal example with a ssz state containing pending partial withdrawals

* add tests for beacon-chain/state/state-native/getters_balance_deposits.go

* Add electra field to testing/util/block.go execution payload

* godoc commentary on public methods

* Fix failing test

* Add balances index out of bounds check and relevant tests.

* Revert switch case electra

* Instead of copying spectest data into testdata, use the spectest dependency

* Deepsource fixes

* Address @rkapka PR feedback

* s/MaxPendingPartialsPerWithdrawalSweep/MaxPendingPartialsPerWithdrawalsSweep/

* Use multivalue slice compatible accessors for validator and balance in ActiveBalanceAtIndex

* More @rkapka feedback. What a great reviewer!

* More tests for branching logic in ExitEpochAndUpdateChurn

* fix build

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-05-06 18:04:33 +00:00
Preston Van Loon
26355768a0 Spectest: v1.5.0-alpha.1 (#13934)
* Update spectests to v1.5.0-alpha.1

* Add PeerDAS config change
2024-05-03 18:53:46 +00:00
kasey
80bff0dc2d Fork-specific consensus-types interfaces (#13948)
* fork-specific interface for electra

* add electra to wrapped payload switch

* use electra body in block factory

* deepsource

* rm pb getters from electra payload

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-05-03 17:12:27 +00:00
615 changed files with 23674 additions and 8378 deletions

View File

@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
build --workspace_status_command=./hack/workspace_status.sh
build --define blst_disabled=false
build --compilation_mode=opt
run --define blst_disabled=false
build:blst_disabled --define blst_disabled=true

View File

@@ -12,8 +12,7 @@
build:remote-cache --remote_download_minimal
build:remote-cache --remote_build_event_upload=minimal
build:remote-cache --remote_cache=grpc://bazel-remote-cache:9092
# Does not work with rules_oci. See https://github.com/bazel-contrib/rules_oci/issues/292
#build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --experimental_remote_downloader=grpc://bazel-remote-cache:9092
build:remote-cache --remote_local_fallback
build:remote-cache --experimental_remote_cache_async
build:remote-cache --experimental_remote_merkle_tree_cache

View File

@@ -227,7 +227,7 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.5.0-alpha.0"
consensus_spec_version = "v1.5.0-alpha.2"
bls_test_version = "v0.1.1"
@@ -243,7 +243,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "33c5547772b6d8d6f041dff7e7d26b0358c2392daed34394a3aa81147812a81c",
integrity = "sha256-NNXBa7SZ2sFb68HPNahgu1p0yDBpjuKJuLfRCl7vvoQ=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -259,7 +259,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "06f286199cf2fedd4700487fb8feb0904e0ae18daaa4b3f70ea430ca9c388167",
integrity = "sha256-7BnlBvGWU92iAB100cMaAXVQhRrqpMQbavgrI+/paCw=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -275,7 +275,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "5f2a4452b323075eba6bf950003f7d91fd04ebcbde5bd087beafb5d6f6325ad4",
integrity = "sha256-VCHhcNt+fynf/sHK11qbRBAy608u9T1qAafvAGfxQhA=",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -290,7 +290,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "fd7e83e8cbeb3e297f2aeb93776305f7d606272c97834d8d9be673984501ed36",
integrity = "sha256-a2aCNFyFkYLtf6QSwGOHdx7xXHjA2NNT8x8ZuxB0aes=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -36,19 +36,19 @@ func (n *NodeHealthTracker) IsHealthy() bool {
}
func (n *NodeHealthTracker) CheckHealth(ctx context.Context) bool {
n.RLock()
n.Lock()
defer n.Unlock()
newStatus := n.node.IsHealthy(ctx)
if n.isHealthy == nil {
n.isHealthy = &newStatus
}
isStatusChanged := newStatus != *n.isHealthy
n.RUnlock()
isStatusChanged := newStatus != *n.isHealthy
if isStatusChanged {
n.Lock()
// Double-check the condition to ensure it hasn't changed since the first check.
// Update the health status
n.isHealthy = &newStatus
n.Unlock() // It's better to unlock as soon as the protected section is over.
// Send the new status to the health channel
n.healthChan <- newStatus
}
return newStatus

View File

@@ -99,9 +99,9 @@ func TestNodeHealth_Concurrency(t *testing.T) {
for i := 0; i < numGoroutines; i++ {
go func() {
defer wg.Done()
client.EXPECT().IsHealthy(gomock.Any()).Return(false)
client.EXPECT().IsHealthy(gomock.Any()).Return(false).Times(1)
n.CheckHealth(context.Background())
client.EXPECT().IsHealthy(gomock.Any()).Return(true)
client.EXPECT().IsHealthy(gomock.Any()).Return(true).Times(1)
n.CheckHealth(context.Background())
}()
}

View File

@@ -6,6 +6,7 @@ import (
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
@@ -22,7 +23,7 @@ type SignedBid interface {
type Bid interface {
Header() (interfaces.ExecutionData, error)
BlobKzgCommitments() ([][]byte, error)
Value() []byte
Value() primitives.Wei
Pubkey() []byte
Version() int
IsNil() bool
@@ -125,8 +126,8 @@ func (b builderBid) Version() int {
}
// Value --
func (b builderBid) Value() []byte {
return b.p.Value
func (b builderBid) Value() primitives.Wei {
return primitives.LittleEndianBytesToWei(b.p.Value)
}
// Pubkey --
@@ -165,7 +166,7 @@ func WrappedBuilderBidCapella(p *ethpb.BuilderBidCapella) (Bid, error) {
// Header returns the execution data interface.
func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header)
}
// BlobKzgCommitments --
@@ -179,8 +180,8 @@ func (b builderBidCapella) Version() int {
}
// Value --
func (b builderBidCapella) Value() []byte {
return b.p.Value
func (b builderBidCapella) Value() primitives.Wei {
return primitives.LittleEndianBytesToWei(b.p.Value)
}
// Pubkey --
@@ -222,8 +223,8 @@ func (b builderBidDeneb) Version() int {
}
// Value --
func (b builderBidDeneb) Value() []byte {
return b.p.Value
func (b builderBidDeneb) Value() primitives.Wei {
return primitives.LittleEndianBytesToWei(b.p.Value)
}
// Pubkey --
@@ -249,7 +250,7 @@ func (b builderBidDeneb) HashTreeRootWith(hh *ssz.Hasher) error {
// Header --
func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
// We have to convert big endian to little endian because the value is coming from the execution layer.
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header, blocks.PayloadValueToWei(b.p.Value))
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header)
}
// BlobKzgCommitments --

View File

@@ -330,7 +330,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
if err != nil {
return nil, nil, err
}
ed, err := blocks.NewWrappedExecutionData(pb, nil)
ed, err := blocks.NewWrappedExecutionData(pb)
if err != nil {
return nil, nil, err
}

View File

@@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"net/http"
"net/url"
"strconv"
@@ -16,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
@@ -198,12 +198,12 @@ func TestClient_GetHeader(t *testing.T) {
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedTxRoot, withdrawalsRoot))
require.Equal(t, uint64(1), bidHeader.GasUsed())
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
// this matches the value in the testExampleHeaderResponse
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
value, err := stringToUint256(bidStr)
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
})
t.Run("capella", func(t *testing.T) {
hc := &http.Client{
@@ -230,12 +230,11 @@ func TestClient_GetHeader(t *testing.T) {
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
value, err := stringToUint256(bidStr)
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
})
t.Run("deneb", func(t *testing.T) {
hc := &http.Client{
@@ -262,12 +261,13 @@ func TestClient_GetHeader(t *testing.T) {
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
require.NoError(t, err)
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
value, err := stringToUint256("652312848583266388373324160190187140051835877600158453279131187530910662656")
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
value, err := stringToUint256(bidStr)
require.NoError(t, err)
require.Equal(t, fmt.Sprintf("%#x", value.SSZBytes()), fmt.Sprintf("%#x", bid.Value()))
bidValue := bytesutil.ReverseByteOrder(bid.Value())
require.DeepEqual(t, bidValue, value.Bytes())
require.DeepEqual(t, big.NewInt(0).SetBytes(bidValue), value.Int)
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
kcgCommitments, err := bid.BlobKzgCommitments()
require.NoError(t, err)
require.Equal(t, len(kcgCommitments) > 0, true)

View File

@@ -156,6 +156,8 @@ func (bb *BuilderBid) ToProto() (*eth.BuilderBid, error) {
}
return &eth.BuilderBid{
Header: header,
// Note that SSZBytes() reverses byte order for the little-endian representation.
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
Value: bb.Value.SSZBytes(),
Pubkey: bb.Pubkey,
}, nil
@@ -484,6 +486,8 @@ func (bb *BuilderBidCapella) ToProto() (*eth.BuilderBidCapella, error) {
}
return &eth.BuilderBidCapella{
Header: header,
// Note that SSZBytes() reverses byte order for the little-endian representation.
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
}, nil
@@ -1022,8 +1026,10 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
return &eth.BuilderBidDeneb{
Header: header,
BlobKzgCommitments: kzgCommitments,
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
// Note that SSZBytes() reverses byte order for the little-endian representation.
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
}, nil
}

View File

@@ -4,6 +4,6 @@ const (
WebUrlPrefix = "/v2/validator/"
WebApiUrlPrefix = "/api/v2/validator/"
KeymanagerApiPrefix = "/eth/v1"
AuthTokenFileName = "auth-token"
SystemLogsPrefix = "health/logs"
AuthTokenFileName = "auth-token"
)

View File

@@ -26,6 +26,7 @@ go_library(
"receive_attestation.go",
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
"service.go",
"tracked_proposer.go",
"weak_subjectivity_checks.go",
@@ -48,6 +49,7 @@ go_library(
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -67,6 +69,7 @@ go_library(
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -157,6 +160,7 @@ go_test(
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",

View File

@@ -33,6 +33,7 @@ var (
)
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
// An invalid block is the block that fails state transition based on the core protocol rules.
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.

View File

@@ -325,7 +325,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var attr payloadattribute.Attributer
switch st.Version() {
case version.Deneb:
withdrawals, err := st.ExpectedWithdrawals()
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri
@@ -342,7 +342,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
return emptyAttri
}
case version.Capella:
withdrawals, err := st.ExpectedWithdrawals()
withdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return emptyAttri

View File

@@ -13,7 +13,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
@@ -313,7 +312,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []interfaces.Attestation{
wantAtts := []ethpb.Att{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
@@ -390,7 +389,7 @@ func TestSaveOrphanedOps(t *testing.T) {
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []interfaces.Attestation{
wantAtts := []ethpb.Att{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],
@@ -518,7 +517,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
require.Equal(t, 3, service.cfg.AttPool.AggregatedAttestationCount())
wantAtts := []interfaces.Attestation{
wantAtts := []ethpb.Att{
blk3.Block.Body.Attestations[0],
blk2.Block.Body.Attestations[0],
blk1.Block.Body.Attestations[0],

View File

@@ -12,6 +12,8 @@ go_library(
deps = [
"//consensus-types/blocks:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -5,6 +5,8 @@ import (
"encoding/json"
GoKZG "github.com/crate-crypto/go-kzg-4844"
CKZG "github.com/ethereum/c-kzg-4844/bindings/go"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
)
@@ -12,17 +14,38 @@ var (
//go:embed trusted_setup.json
embeddedTrustedSetup []byte // 1.2Mb
kzgContext *GoKZG.Context
kzgLoaded bool
)
func Start() error {
parsedSetup := GoKZG.JSONTrustedSetup{}
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
parsedSetup := &GoKZG.JSONTrustedSetup{}
err := json.Unmarshal(embeddedTrustedSetup, parsedSetup)
if err != nil {
return errors.Wrap(err, "could not parse trusted setup JSON")
}
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
kzgContext, err = GoKZG.NewContext4096(parsedSetup)
if err != nil {
return errors.Wrap(err, "could not initialize go-kzg context")
}
g1Lagrange := &parsedSetup.SetupG1Lagrange
// Length of a G1 point, converted from hex to binary.
g1s := make([]byte, len(g1Lagrange)*(len(g1Lagrange[0])-2)/2)
for i, g1 := range g1Lagrange {
copy(g1s[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
}
// Length of a G2 point, converted from hex to binary.
g2s := make([]byte, len(parsedSetup.SetupG2)*(len(parsedSetup.SetupG2[0])-2)/2)
for i, g2 := range parsedSetup.SetupG2 {
copy(g2s[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
}
if !kzgLoaded {
// Free the current trusted setup before running this method. CKZG
// panics if the same setup is run multiple times.
if err = CKZG.LoadTrustedSetup(g1s, g2s); err != nil {
panic(err)
}
}
kzgLoaded = true
return nil
}

View File

@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
}
// WithP2PBroadcaster to broadcast messages after appropriate processing.
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
func WithP2PBroadcaster(p p2p.Acceser) Option {
return func(s *Service) error {
s.cfg.P2p = p
s.cfg.P2P = p
return nil
}
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
@@ -37,7 +36,7 @@ import (
//
// # Update latest messages for attesting indices
// update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
func (s *Service) OnAttestation(ctx context.Context, a interfaces.Attestation, disparity time.Duration) error {
func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time.Duration) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onAttestation")
defer span.End()
@@ -81,11 +80,11 @@ func (s *Service) OnAttestation(ctx context.Context, a interfaces.Attestation, d
}
// Use the target state to verify attesting indices are valid.
committee, err := helpers.BeaconCommitteeFromState(ctx, baseState, a.GetData().Slot, a.GetData().CommitteeIndex)
committees, err := helpers.AttestationCommittees(ctx, baseState, a)
if err != nil {
return err
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committee)
indexedAtt, err := attestation.ConvertToIndexed(ctx, a, committees...)
if err != nil {
return err
}

View File

@@ -7,10 +7,11 @@ import (
"time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
@@ -74,7 +75,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
tests := []struct {
name string
a interfaces.Attestation
a ethpb.Att
wantedErr string
}{
{
@@ -126,25 +127,36 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
}
func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
eval := func(ctx context.Context, service *Service, genesisState state.BeaconState, pks []bls.SecretKey) {
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(att[0].GetData().Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
ojc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
ofc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
}
genesisState, pks := util.DeterministicGenesisState(t, 64)
service.SetGenesisTime(time.Unix(time.Now().Unix()-int64(params.BeaconConfig().SecondsPerSlot), 0))
require.NoError(t, service.saveGenesisData(ctx, genesisState))
att, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(att[0].Data.Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
ojc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
ofc := &ethpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
t.Run("pre-Electra", func(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
genesisState, pks := util.DeterministicGenesisState(t, 64)
eval(ctx, service, genesisState, pks)
})
t.Run("post-Electra", func(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
genesisState, pks := util.DeterministicGenesisStateElectra(t, 64)
eval(ctx, service, genesisState, pks)
})
}
func TestService_GetRecentPreState(t *testing.T) {

View File

@@ -6,6 +6,8 @@ import (
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@@ -366,11 +368,11 @@ func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot,
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
// Feed in block's attestations to fork choice store.
for _, a := range blk.Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.GetData().Slot, a.GetData().CommitteeIndex)
committees, err := helpers.AttestationCommittees(ctx, st, a)
if err != nil {
return err
}
indices, err := attestation.AttestingIndices(a.GetAggregationBits(), committee)
indices, err := attestation.AttestingIndices(a, committees...)
if err != nil {
return err
}
@@ -387,7 +389,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
// This function requires a write lock on forkchoice.
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []interfaces.AttesterSlashing) {
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []ethpb.AttSlashing) {
for _, slashing := range slashings {
indices := blocks.SlashableAttesterIndices(slashing)
for _, index := range indices {
@@ -500,7 +502,7 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
}
indices, err := bs.Indices(root)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "indices")
}
missing := make(map[uint64]struct{}, len(expected))
for i := range expected {
@@ -514,12 +516,35 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
return missing, nil
}
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
if len(expected) == 0 {
return nil, nil
}
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
return nil, errMaxDataColumnsExceeded
}
indices, err := bs.ColumnIndices(root)
if err != nil {
return nil, err
}
missing := make(map[uint64]bool, len(expected))
for col := range expected {
if !indices[col] {
missing[col] = true
}
}
return missing, nil
}
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
// The function will first check the database to see if all sidecars have been persisted. If any
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
if features.Get().EnablePeerDAS {
return s.isDataAvailableDataColumns(ctx, root, signed)
}
if signed.Version() < version.Deneb {
return nil
}
@@ -549,7 +574,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
// get a map of BlobSidecar indices that are not currently available.
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
if err != nil {
return err
return errors.Wrap(err, "missing indices")
}
// If there are no missing indices, all BlobSidecars are available.
if len(missing) == 0 {
@@ -568,8 +593,13 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
if len(missing) == 0 {
return
}
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
Error("Still waiting for DA check at slot end.")
log.WithFields(logrus.Fields{
"slot": signed.Block().Slot(),
"root": fmt.Sprintf("%#x", root),
"blobsExpected": expected,
"blobsWaiting": len(missing),
}).Error("Still waiting for blobs DA check at slot end.")
})
defer nst.Stop()
}
@@ -591,12 +621,104 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
}
}
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
return logrus.Fields{
"slot": slot,
"root": fmt.Sprintf("%#x", root),
"blobsExpected": expected,
"blobsWaiting": missing,
func (s *Service) isDataAvailableDataColumns(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
if signed.Version() < version.Deneb {
return nil
}
block := signed.Block()
if block == nil {
return errors.New("invalid nil beacon block")
}
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
return nil
}
body := block.Body()
if body == nil {
return errors.New("invalid nil beacon block body")
}
kzgCommitments, err := body.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "could not get KZG commitments")
}
// If block has not commitments there is nothing to wait for.
if len(kzgCommitments) == 0 {
return nil
}
custodiedSubnetCount := params.BeaconConfig().CustodyRequirement
if flags.Get().SubscribeToAllSubnets {
custodiedSubnetCount = params.BeaconConfig().DataColumnSidecarSubnetCount
}
colMap, err := peerdas.CustodyColumns(s.cfg.P2P.NodeID(), custodiedSubnetCount)
if err != nil {
return err
}
// Expected is the number of custodied data columnns a node is expected to have.
expected := len(colMap)
if expected == 0 {
return nil
}
// Subscribe to newsly data columns stored in the database.
rootIndexChan := make(chan filesystem.RootIndexPair)
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
defer subscription.Unsubscribe()
// Get a map of data column indices that are not currently available.
missing, err := missingDataColumns(s.blobStorage, root, colMap)
if err != nil {
return err
}
// If there are no missing indices, all data column sidecars are available.
// This is the happy path.
if len(missing) == 0 {
return nil
}
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
nst := time.AfterFunc(time.Until(nextSlot), func() {
if len(missing) == 0 {
return
}
log.WithFields(logrus.Fields{
"slot": signed.Block().Slot(),
"root": fmt.Sprintf("%#x", root),
"columnsExpected": expected,
"columnsWaiting": len(missing),
}).Error("Still waiting for data columns DA check at slot end.")
})
defer nst.Stop()
}
for {
select {
case rootIndex := <-rootIndexChan:
if rootIndex.Root != root {
// This is not the root we are looking for.
continue
}
// Remove the index from the missing map.
delete(missing, rootIndex.Index)
// Exit if there is no more missing data columns.
if len(missing) == 0 {
return nil
}
case <-ctx.Done():
missingIndexes := make([]uint64, 0, len(missing))
for val := range missing {
copiedVal := val
missingIndexes = append(missingIndexes, copiedVal)
}
return errors.Wrapf(ctx.Err(), "context deadline waiting for data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndexes)
}
}
}

View File

@@ -824,7 +824,7 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, &ethpb.StateSummary{Root: r[:]}))
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, r))
atts := make([]interfaces.Attestation, len(b.Block.Body.Attestations))
atts := make([]ethpb.Att, len(b.Block.Body.Attestations))
for i, a := range b.Block.Body.Attestations {
atts[i] = a
}
@@ -1963,68 +1963,130 @@ func TestNoViableHead_Reboot(t *testing.T) {
}
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
t.Run("pre-Electra", func(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
require.NoError(t, service.saveGenesisData(ctx, st))
require.NoError(t, service.saveGenesisData(ctx, st))
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
// prepare another block that is not inserted
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
require.NoError(t, err)
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
// prepare another block that is not inserted
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
require.NoError(t, err)
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a := wsb.Block().Body().Attestations()[0]
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a := wsb.Block().Body().Attestations()[0]
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a3 := wsb3.Block().Body().Attestations()[0]
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a3 := wsb3.Block().Body().Attestations()[0]
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
})
t.Run("post-Electra", func(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
st, keys := util.DeterministicGenesisStateElectra(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
genesis, err := blocks.NewGenesisBlockForState(ctx, st)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, genesis), "Could not save genesis block")
parentRoot, err := genesis.Block().HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
// prepare another block that is not inserted
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
require.NoError(t, err)
b3, err := util.GenerateFullBlockElectra(st3, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a := wsb.Block().Body().Attestations()[0]
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a3 := wsb3.Block().Body().Attestations()[0]
r3 := bytesutil.ToBytes32(a3.GetData().BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committee as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committee as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
})
}
func TestFillMissingBlockPayloadId_DiffSlotExitEarly(t *testing.T) {

View File

@@ -10,7 +10,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -32,7 +31,7 @@ type AttestationStateFetcher interface {
// AttestationReceiver interface defines the methods of chain service receive and processing new attestations.
type AttestationReceiver interface {
AttestationStateFetcher
VerifyLmdFfgConsistency(ctx context.Context, att interfaces.Attestation) error
VerifyLmdFfgConsistency(ctx context.Context, att ethpb.Att) error
InForkchoice([32]byte) bool
}
@@ -52,7 +51,7 @@ func (s *Service) AttestationTargetState(ctx context.Context, target *ethpb.Chec
}
// VerifyLmdFfgConsistency verifies that attestation's LMD and FFG votes are consistency to each other.
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a interfaces.Attestation) error {
func (s *Service) VerifyLmdFfgConsistency(ctx context.Context, a ethpb.Att) error {
r, err := s.TargetRootForEpoch([32]byte(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
if err != nil {
return err
@@ -207,7 +206,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
// 1. Validate attestation, update validator's latest vote
// 2. Apply fork choice to the processed attestation
// 3. Save latest head info
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att interfaces.Attestation, disparity time.Duration) error {
func (s *Service) receiveAttestationNoPubsub(ctx context.Context, att ethpb.Att, disparity time.Duration) error {
ctx, span := trace.StartSpan(ctx, "beacon-chain.blockchain.receiveAttestationNoPubsub")
defer span.End()

View File

@@ -10,7 +10,6 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -74,7 +73,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
require.NoError(t, service.saveGenesisData(ctx, genesisState))
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
require.NoError(t, err)
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
tRoot := bytesutil.ToBytes32(atts[0].GetData().Target.Root)
copied := genesisState.Copy()
copied, err = transition.ProcessSlots(ctx, copied, 1)
require.NoError(t, err)
@@ -84,7 +83,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
attsToSave := make([]interfaces.Attestation, len(atts))
attsToSave := make([]ethpb.Att, len(atts))
for i, a := range atts {
attsToSave[i] = a
}
@@ -126,14 +125,14 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
// Generate attestations for this block in Slot 1
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
require.NoError(t, err)
attsToSave := make([]interfaces.Attestation, len(atts))
attsToSave := make([]ethpb.Att, len(atts))
for i, a := range atts {
attsToSave[i] = a
}
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(attsToSave))
// Verify the target is in forkchoice
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot))
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot)))
require.Equal(t, tRoot, bytesutil.ToBytes32(atts[0].GetData().BeaconBlockRoot))
require.Equal(t, true, fcs.HasNode(service.originBlockRoot))
// Insert a new block to forkchoice

View File

@@ -50,9 +50,15 @@ type BlobReceiver interface {
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
}
// DataColumnReceiver interface defines the methods of chain service for receiving new
// data columns
type DataColumnReceiver interface {
ReceiveDataColumn(context.Context, blocks.VerifiedRODataColumn) error
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashing interfaces.AttesterSlashing)
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
}
// ReceiveBlock is a function that defines the operations (minus pubsub)
@@ -295,10 +301,10 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
}
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing interfaces.AttesterSlashing) {
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing) {
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
s.InsertSlashingsToForkChoiceStore(ctx, []interfaces.AttesterSlashing{slashing})
s.InsertSlashingsToForkChoiceStore(ctx, []ethpb.AttSlashing{slashing})
}
// prunePostBlockOperationPools only runs on new head otherwise should return a nil.
@@ -479,12 +485,12 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
// is done in the background to avoid adding more load to this critical code path.
ctx := context.TODO()
for _, att := range signed.Block().Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, preState, att.GetData().Slot, att.GetData().CommitteeIndex)
committees, err := helpers.AttestationCommittees(ctx, preState, att)
if err != nil {
log.WithError(err).Error("Could not get attestation committee")
log.WithError(err).Error("Could not get attestation committees")
return
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committees...)
if err != nil {
log.WithError(err).Error("Could not convert to indexed attestation")
return

View File

@@ -0,0 +1,16 @@
package blockchain
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
)
func (s *Service) ReceiveDataColumn(ctx context.Context, ds blocks.VerifiedRODataColumn) error {
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
return errors.Wrap(err, "save data column")
}
return nil
}

View File

@@ -82,7 +82,7 @@ type config struct {
ExitPool voluntaryexits.PoolManager
SlashingPool slashings.PoolManager
BLSToExecPool blstoexec.PoolManager
P2p p2p.Broadcaster
P2P p2p.Acceser
MaxRoutines int
StateNotifier statefeed.Notifier
ForkChoiceStore f.ForkChoicer
@@ -107,15 +107,17 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
type blobNotifierMap struct {
sync.RWMutex
notifiers map[[32]byte]chan uint64
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
}
// notifyIndex notifies a blob by its index for a given root.
// It uses internal maps to keep track of seen indices and notifier channels.
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
if idx >= fieldparams.MaxBlobsPerBlock {
return
}
// TODO: Separate Data Columns from blobs
/*
if idx >= fieldparams.MaxBlobsPerBlock {
return
}*/
bn.Lock()
seen := bn.seenIndex[root]
@@ -129,7 +131,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
// Retrieve or create the notifier channel for the given root.
c, ok := bn.notifiers[root]
if !ok {
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
c = make(chan uint64, fieldparams.NumberOfColumns)
bn.notifiers[root] = c
}
@@ -143,7 +145,7 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
defer bn.Unlock()
c, ok := bn.notifiers[root]
if !ok {
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
c = make(chan uint64, fieldparams.NumberOfColumns)
bn.notifiers[root] = c
}
return c
@@ -169,7 +171,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
ctx, cancel := context.WithCancel(ctx)
bn := &blobNotifierMap{
notifiers: make(map[[32]byte]chan uint64),
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
}
srv := &Service{
ctx: ctx,

View File

@@ -95,7 +95,7 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
WithAttestationPool(attestations.NewPool()),
WithSlashingPool(slashings.NewPool()),
WithExitPool(voluntaryexits.NewPool()),
WithP2PBroadcaster(&mockBroadcaster{}),
WithP2PBroadcaster(&mockAccesser{}),
WithStateNotifier(&mockBeaconNode{}),
WithForkChoiceStore(fc),
WithAttestationService(attService),
@@ -518,7 +518,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
func TestNotifyIndex(t *testing.T) {
// Initialize a blobNotifierMap
bn := &blobNotifierMap{
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
notifiers: make(map[[32]byte]chan uint64),
}

View File

@@ -13,14 +13,15 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"google.golang.org/protobuf/proto"
@@ -45,12 +46,17 @@ type mockBroadcaster struct {
broadcastCalled bool
}
type mockAccesser struct {
mockBroadcaster
p2pTesting.MockPeerManager
}
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ interfaces.Attestation) error {
func (mb *mockBroadcaster) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error {
mb.broadcastCalled = true
return nil
}
@@ -65,6 +71,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error {
mb.broadcastCalled = true
return nil
}
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
}
@@ -121,6 +132,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
WithTrackedValidatorsCache(cache.NewTrackedValidatorsCache()),
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
WithSyncChecker(mock.MockChecker{}),
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
}
// append the variadic opts so they override the defaults by being processed afterwards
opts = append(defOpts, opts...)

View File

@@ -414,7 +414,7 @@ func (*ChainService) HeadGenesisValidatorsRoot() [32]byte {
}
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a interfaces.Attestation) error {
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a ethpb.Att) error {
if !bytes.Equal(a.GetData().BeaconBlockRoot, a.GetData().Target.Root) {
return errors.New("LMD and FFG miss matched")
}
@@ -495,7 +495,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
}
// ReceiveAttesterSlashing mocks the same method in the chain service.
func (*ChainService) ReceiveAttesterSlashing(context.Context, interfaces.AttesterSlashing) {}
func (*ChainService) ReceiveAttesterSlashing(context.Context, ethpb.AttSlashing) {}
// IsFinalized mocks the same method in the chain service.
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
@@ -628,6 +628,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
return nil
}
// ReceiveDataColumn implements the same method in chain service
func (c *ChainService) ReceiveDataColumn(_ context.Context, _ blocks.VerifiedRODataColumn) error {
return nil
}
// TargetRootForEpoch mocks the same method in the chain service
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil

View File

@@ -2,7 +2,6 @@ package testing
import (
"context"
"math/big"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/api/client/builder"
@@ -55,13 +54,13 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Capella:
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella, big.NewInt(0))
w, err := blocks.WrappedExecutionPayloadCapella(s.PayloadCapella)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
}
return w, nil, s.ErrSubmitBlindedBlock
case version.Deneb:
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb, big.NewInt(0))
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
if err != nil {
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
}

View File

@@ -8,6 +8,7 @@ go_library(
"attestation_data.go",
"balance_cache_key.go",
"checkpoint_state.go",
"column_subnet_ids.go",
"committee.go",
"committee_disabled.go", # keep
"committees.go",

65
beacon-chain/cache/column_subnet_ids.go vendored Normal file
View File

@@ -0,0 +1,65 @@
package cache
import (
"sync"
"time"
"github.com/patrickmn/go-cache"
"github.com/prysmaticlabs/prysm/v5/config/params"
)
type columnSubnetIDs struct {
colSubCache *cache.Cache
colSubLock sync.RWMutex
}
// ColumnSubnetIDs for column subnet participants
var ColumnSubnetIDs = newColumnSubnetIDs()
const columnKey = "columns"
func newColumnSubnetIDs() *columnSubnetIDs {
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
// Set the default duration of a column subnet subscription as the column expiry period.
subLength := epochDuration * time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
return &columnSubnetIDs{colSubCache: persistentCache}
}
// GetColumnSubnets retrieves the data column subnets.
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
s.colSubLock.RLock()
defer s.colSubLock.RUnlock()
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
if !ok {
return nil, false, time.Time{}
}
// Retrieve indices from the cache.
idxs, ok := id.([]uint64)
if !ok {
return nil, false, time.Time{}
}
return idxs, ok, duration
}
// AddColumnSubnets adds the relevant data column subnets.
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
s.colSubLock.Lock()
defer s.colSubLock.Unlock()
s.colSubCache.Set(columnKey, colIdx, 0)
}
// EmptyAllCaches empties out all the related caches and flushes any stored
// entries on them. This should only ever be used for testing, in normal
// production, handling of the relevant subnets for each role is done
// separately.
func (s *columnSubnetIDs) EmptyAllCaches() {
// Clear the cache.
s.colSubLock.Lock()
defer s.colSubLock.Unlock()
s.colSubCache.Flush()
}

View File

@@ -48,7 +48,7 @@ func ProcessAttestationsNoVerifySignature(
func ProcessAttestationNoVerifySignature(
ctx context.Context,
beaconState state.BeaconState,
att interfaces.Attestation,
att ethpb.Att,
totalBalance uint64,
) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "altair.ProcessAttestationNoVerifySignature")
@@ -66,11 +66,11 @@ func ProcessAttestationNoVerifySignature(
if err != nil {
return nil, err
}
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
committees, err := helpers.AttestationCommittees(ctx, beaconState, att)
if err != nil {
return nil, err
}
indices, err := attestation.AttestingIndices(att.GetAggregationBits(), committee)
indices, err := attestation.AttestingIndices(att, committees...)
if err != nil {
return nil, err
}

View File

@@ -195,47 +195,95 @@ func TestProcessAttestations_InvalidAggregationBitsLength(t *testing.T) {
}
func TestProcessAttestations_OK(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
t.Run("pre-Electra", func(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := util.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Root: mockRoot[:]},
},
AggregationBits: aggBits,
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := util.HydrateAttestation(&ethpb.Attestation{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Root: mockRoot[:]},
},
AggregationBits: aggBits,
})
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
require.NoError(t, err)
attestingIndices, err := attestation.AttestingIndices(att, committee)
require.NoError(t, err)
sigs := make([]bls.Signature, len(attestingIndices))
for i, indice := range attestingIndices {
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
}
att.Signature = bls.AggregateSignatures(sigs).Marshal()
block := util.NewBeaconBlockAltair()
block.Block.Body.Attestations = []*ethpb.Attestation{att}
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
require.NoError(t, err)
})
t.Run("post-Electra", func(t *testing.T) {
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, 100)
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(0, true)
committeeBits := primitives.NewAttestationCommitteeBits()
committeeBits.SetBitAt(0, true)
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
att := util.HydrateAttestationElectra(&ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Root: mockRoot[:]},
},
AggregationBits: aggBits,
CommitteeBits: committeeBits,
})
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
require.NoError(t, err)
attestingIndices, err := attestation.AttestingIndices(att.AggregationBits, committee)
require.NoError(t, err)
sigs := make([]bls.Signature, len(attestingIndices))
for i, indice := range attestingIndices {
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
cfc := beaconState.CurrentJustifiedCheckpoint()
cfc.Root = mockRoot[:]
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(cfc))
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, 0)
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
attestingIndices, err := attestation.AttestingIndices(att, committee)
require.NoError(t, err)
sigs[i] = sig
}
att.Signature = bls.AggregateSignatures(sigs).Marshal()
sigs := make([]bls.Signature, len(attestingIndices))
for i, indice := range attestingIndices {
sb, err := signing.ComputeDomainAndSign(beaconState, 0, att.Data, params.BeaconConfig().DomainBeaconAttester, privKeys[indice])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
sigs[i] = sig
}
att.Signature = bls.AggregateSignatures(sigs).Marshal()
block := util.NewBeaconBlockAltair()
block.Block.Body.Attestations = []*ethpb.Attestation{att}
block := util.NewBeaconBlockElectra()
block.Block.Body.Attestations = []*ethpb.AttestationElectra{att}
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
require.NoError(t, err)
err = beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
_, err = altair.ProcessAttestationsNoVerifySignature(context.Background(), beaconState, wsb.Block())
require.NoError(t, err)
})
}
func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
@@ -273,7 +321,7 @@ func TestProcessAttestationNoVerify_SourceTargetHead(t *testing.T) {
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att.Data.Slot, att.Data.CommitteeIndex)
require.NoError(t, err)
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
indices, err := attestation.AttestingIndices(att, committee)
require.NoError(t, err)
for _, index := range indices {
has, err := altair.HasValidatorFlag(p[index], params.BeaconConfig().TimelyHeadFlagIndex)

View File

@@ -154,11 +154,11 @@ func TranslateParticipation(ctx context.Context, state state.BeaconState, atts [
if err != nil {
return nil, err
}
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return nil, err
}
indices, err := attestation.AttestingIndices(att.AggregationBits, committee)
indices, err := attestation.AttestingIndices(att, committee)
if err != nil {
return nil, err
}

View File

@@ -55,7 +55,7 @@ func TestTranslateParticipation(t *testing.T) {
committee, err := helpers.BeaconCommitteeFromState(ctx, s, pendingAtts[0].Data.Slot, pendingAtts[0].Data.CommitteeIndex)
require.NoError(t, err)
indices, err := attestation.AttestingIndices(pendingAtts[0].AggregationBits, committee)
indices, err := attestation.AttestingIndices(pendingAtts[0], committee)
require.NoError(t, err)
for _, index := range indices {
has, err := altair.HasValidatorFlag(participation[index], params.BeaconConfig().TimelySourceFlagIndex)

View File

@@ -46,7 +46,7 @@ func ProcessAttestationsNoVerifySignature(
func VerifyAttestationNoVerifySignature(
ctx context.Context,
beaconState state.ReadOnlyBeaconState,
att interfaces.Attestation,
att ethpb.Att,
) error {
ctx, span := trace.StartSpan(ctx, "core.VerifyAttestationNoVerifySignature")
defer span.End()
@@ -107,22 +107,53 @@ func VerifyAttestationNoVerifySignature(
return err
}
c := helpers.SlotCommitteeCount(activeValidatorCount)
if uint64(att.GetData().CommitteeIndex) >= c {
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
}
if err := helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
return errors.Wrap(err, "could not verify attestation bitfields")
}
var indexedAtt ethpb.IndexedAtt
// Verify attesting indices are correct.
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return err
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
return err
if att.Version() < version.Electra {
if uint64(att.GetData().CommitteeIndex) >= c {
return fmt.Errorf("committee index %d >= committee count %d", att.GetData().CommitteeIndex, c)
}
if err = helpers.VerifyAttestationBitfieldLengths(ctx, beaconState, att); err != nil {
return errors.Wrap(err, "could not verify attestation bitfields")
}
// Verify attesting indices are correct.
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return err
}
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
return err
}
} else {
if att.GetData().CommitteeIndex != 0 {
return errors.New("committee index must be 0 post-Electra")
}
committeeIndices := att.CommitteeBitsVal().BitIndices()
committees := make([][]primitives.ValidatorIndex, len(committeeIndices))
participantsCount := 0
var err error
for i, ci := range committeeIndices {
if uint64(ci) >= c {
return fmt.Errorf("committee index %d >= committee count %d", ci, c)
}
committees[i], err = helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, primitives.CommitteeIndex(ci))
if err != nil {
return err
}
participantsCount += len(committees[i])
}
if att.GetAggregationBits().Len() != uint64(participantsCount) {
return fmt.Errorf("aggregation bits count %d is different than participant count %d", att.GetAggregationBits().Len(), participantsCount)
}
indexedAtt, err = attestation.ConvertToIndexed(ctx, att, committees...)
if err != nil {
return err
}
}
return attestation.IsValidAttestationIndices(ctx, indexedAtt)
@@ -133,7 +164,7 @@ func VerifyAttestationNoVerifySignature(
func ProcessAttestationNoVerifySignature(
ctx context.Context,
beaconState state.BeaconState,
att interfaces.Attestation,
att ethpb.Att,
) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.ProcessAttestationNoVerifySignature")
defer span.End()
@@ -169,23 +200,6 @@ func ProcessAttestationNoVerifySignature(
return beaconState, nil
}
// VerifyAttestationSignature converts and attestation into an indexed attestation and verifies
// the signature in that attestation.
func VerifyAttestationSignature(ctx context.Context, beaconState state.ReadOnlyBeaconState, att interfaces.Attestation) error {
if err := helpers.ValidateNilAttestation(att); err != nil {
return err
}
committee, err := helpers.BeaconCommitteeFromState(ctx, beaconState, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return err
}
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committee)
if err != nil {
return err
}
return VerifyIndexedAttestation(ctx, beaconState, indexedAtt)
}
// VerifyIndexedAttestation determines the validity of an indexed attestation.
//
// Spec pseudocode definition:

View File

@@ -11,7 +11,6 @@ import (
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -45,7 +44,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
require.NoError(t, err)
attestingIndices1, err := attestation.AttestingIndices(att1.AggregationBits, committee)
attestingIndices1, err := attestation.AttestingIndices(att1, committee)
require.NoError(t, err)
sigs := make([]bls.Signature, len(attestingIndices1))
for i, indice := range attestingIndices1 {
@@ -67,7 +66,7 @@ func TestProcessAggregatedAttestation_OverlappingBits(t *testing.T) {
committee, err = helpers.BeaconCommitteeFromState(context.Background(), beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
require.NoError(t, err)
attestingIndices2, err := attestation.AttestingIndices(att2.AggregationBits, committee)
attestingIndices2, err := attestation.AttestingIndices(att2, committee)
require.NoError(t, err)
sigs = make([]bls.Signature, len(attestingIndices2))
for i, indice := range attestingIndices2 {
@@ -222,6 +221,83 @@ func TestVerifyAttestationNoVerifySignature_BadAttIdx(t *testing.T) {
require.ErrorContains(t, "committee index 100 >= committee count 1", err)
}
func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
var mockRoot [32]byte
copy(mockRoot[:], "hello-world")
var zeroSig [fieldparams.BLSSignatureLength]byte
beaconState, _ := util.DeterministicGenesisState(t, 100)
err := beaconState.SetSlot(beaconState.Slot() + params.BeaconConfig().MinAttestationInclusionDelay)
require.NoError(t, err)
ckp := beaconState.CurrentJustifiedCheckpoint()
copy(ckp.Root, "hello-world")
require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(ckp))
require.NoError(t, beaconState.AppendCurrentEpochAttestations(&ethpb.PendingAttestation{}))
t.Run("ok", func(t *testing.T) {
aggBits := bitfield.NewBitlist(3)
aggBits.SetBitAt(1, true)
committeeBits := bitfield.NewBitvector64()
committeeBits.SetBitAt(0, true)
att := &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
CommitteeBits: committeeBits,
}
att.Signature = zeroSig[:]
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.NoError(t, err)
})
t.Run("non-zero committee index", func(t *testing.T) {
att := &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
CommitteeIndex: 1,
},
AggregationBits: bitfield.NewBitlist(1),
CommitteeBits: bitfield.NewBitvector64(),
}
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, "committee index must be 0 post-Electra", err)
})
t.Run("index of committee too big", func(t *testing.T) {
aggBits := bitfield.NewBitlist(3)
committeeBits := bitfield.NewBitvector64()
committeeBits.SetBitAt(63, true)
att := &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
CommitteeBits: committeeBits,
}
att.Signature = zeroSig[:]
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, "committee index 63 >= committee count 1", err)
})
t.Run("wrong aggregation bits count", func(t *testing.T) {
aggBits := bitfield.NewBitlist(123)
committeeBits := bitfield.NewBitvector64()
committeeBits.SetBitAt(0, true)
att := &ethpb.AttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 0, Root: mockRoot[:]},
Target: &ethpb.Checkpoint{Epoch: 0, Root: make([]byte, 32)},
},
AggregationBits: aggBits,
CommitteeBits: committeeBits,
}
att.Signature = zeroSig[:]
err = blocks.VerifyAttestationNoVerifySignature(context.TODO(), beaconState, att)
assert.ErrorContains(t, "aggregation bits count 123 is different than participant count 3", err)
})
}
func TestConvertToIndexed_OK(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
@@ -387,7 +463,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
list := bitfield.Bitlist{0b11111}
var atts []interfaces.Attestation
var atts []ethpb.Att
for i := uint64(0); i < 1000; i++ {
atts = append(atts, &ethpb.Attestation{
Data: &ethpb.AttestationData{
@@ -403,7 +479,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
_, err := blocks.AttestationSignatureBatch(context.Background(), beaconState, atts)
assert.ErrorContains(t, want, err)
atts = []interfaces.Attestation{}
atts = []ethpb.Att{}
list = bitfield.Bitlist{0b10000}
for i := uint64(0); i < 1000; i++ {
atts = append(atts, &ethpb.Attestation{
@@ -502,53 +578,109 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
}
}
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(5))
require.NoError(t, st.SetValidators(validators))
t.Run("pre-Electra", func(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(5))
require.NoError(t, st.SetValidators(validators))
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
require.NoError(t, err)
att1 := util.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
Data: &ethpb.AttestationData{
Slot: 1,
},
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
require.NoError(t, err)
att1 := util.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
Data: &ethpb.AttestationData{
Slot: 1,
},
})
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)
var sigs []bls.Signature
for i, u := range comm1 {
att1.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
require.NoError(t, err)
att2 := util.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 1,
},
})
root, err = signing.ComputeSigningRoot(att2.Data, domain)
require.NoError(t, err)
sigs = nil
for i, u := range comm2 {
att2.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
})
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)
var sigs []bls.Signature
for i, u := range comm1 {
att1.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
t.Run("post-Electra", func(t *testing.T) {
st, err := util.NewBeaconStateElectra()
require.NoError(t, err)
require.NoError(t, st.SetSlot(5))
require.NoError(t, st.SetValidators(validators))
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
require.NoError(t, err)
att2 := util.HydrateAttestation(&ethpb.Attestation{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 1,
},
comm1, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 0 /*committeeIndex*/)
require.NoError(t, err)
commBits1 := primitives.NewAttestationCommitteeBits()
commBits1.SetBitAt(0, true)
att1 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{
AggregationBits: bitfield.NewBitlist(uint64(len(comm1))),
CommitteeBits: commBits1,
Data: &ethpb.AttestationData{
Slot: 1,
},
})
domain, err := signing.Domain(st.Fork(), st.Fork().Epoch, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(att1.Data, domain)
require.NoError(t, err)
var sigs []bls.Signature
for i, u := range comm1 {
att1.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att1.Signature = bls.AggregateSignatures(sigs).Marshal()
comm2, err := helpers.BeaconCommitteeFromState(context.Background(), st, 1 /*slot*/, 1 /*committeeIndex*/)
require.NoError(t, err)
commBits2 := primitives.NewAttestationCommitteeBits()
commBits2.SetBitAt(1, true)
att2 := util.HydrateAttestationElectra(&ethpb.AttestationElectra{
AggregationBits: bitfield.NewBitlist(uint64(len(comm2))),
CommitteeBits: commBits2,
Data: &ethpb.AttestationData{
Slot: 1,
},
})
root, err = signing.ComputeSigningRoot(att2.Data, domain)
require.NoError(t, err)
sigs = nil
for i, u := range comm2 {
att2.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
set, err := blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
})
root, err = signing.ComputeSigningRoot(att2.Data, domain)
require.NoError(t, err)
sigs = nil
for i, u := range comm2 {
att2.AggregationBits.SetBitAt(uint64(i), true)
sigs = append(sigs, keys[u].Sign(root[:]))
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
set, err := blocks.AttestationSignatureBatch(ctx, st, []interfaces.Attestation{att1, att2})
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Multiple signatures were unable to be verified.")
}
func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
@@ -608,6 +740,6 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
}
att2.Signature = bls.AggregateSignatures(sigs).Marshal()
_, err = blocks.AttestationSignatureBatch(ctx, st, []interfaces.Attestation{att1, att2})
_, err = blocks.AttestationSignatureBatch(ctx, st, []ethpb.Att{att1, att2})
require.NoError(t, err)
}

View File

@@ -7,14 +7,11 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/slashings"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -41,7 +38,7 @@ import (
func ProcessAttesterSlashings(
ctx context.Context,
beaconState state.BeaconState,
slashings []interfaces.AttesterSlashing,
slashings []ethpb.AttSlashing,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
var err error
@@ -58,7 +55,7 @@ func ProcessAttesterSlashings(
func ProcessAttesterSlashing(
ctx context.Context,
beaconState state.BeaconState,
slashing interfaces.AttesterSlashing,
slashing ethpb.AttSlashing,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
@@ -78,19 +75,7 @@ func ProcessAttesterSlashing(
return nil, err
}
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
cfg := params.BeaconConfig()
var slashingQuotient uint64
switch {
case beaconState.Version() == version.Phase0:
slashingQuotient = cfg.MinSlashingPenaltyQuotient
case beaconState.Version() == version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
case beaconState.Version() >= version.Bellatrix:
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
default:
return nil, errors.New("unknown state version")
}
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex), slashingQuotient, cfg.ProposerRewardQuotient)
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex))
if err != nil {
return nil, errors.Wrapf(err, "could not slash validator index %d",
validatorIndex)
@@ -105,18 +90,18 @@ func ProcessAttesterSlashing(
}
// VerifyAttesterSlashing validates the attestation data in both attestations in the slashing object.
func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaconState, slashing interfaces.AttesterSlashing) error {
func VerifyAttesterSlashing(ctx context.Context, beaconState state.ReadOnlyBeaconState, slashing ethpb.AttSlashing) error {
if slashing == nil {
return errors.New("nil slashing")
}
if slashing.GetFirstAttestation() == nil || slashing.GetSecondAttestation() == nil {
if slashing.FirstAttestation() == nil || slashing.SecondAttestation() == nil {
return errors.New("nil attestation")
}
if slashing.GetFirstAttestation().GetData() == nil || slashing.GetSecondAttestation().GetData() == nil {
if slashing.FirstAttestation().GetData() == nil || slashing.SecondAttestation().GetData() == nil {
return errors.New("nil attestation data")
}
att1 := slashing.GetFirstAttestation()
att2 := slashing.GetSecondAttestation()
att1 := slashing.FirstAttestation()
att2 := slashing.SecondAttestation()
data1 := att1.GetData()
data2 := att2.GetData()
if !IsSlashableAttestationData(data1, data2) {
@@ -158,11 +143,11 @@ func IsSlashableAttestationData(data1, data2 *ethpb.AttestationData) bool {
}
// SlashableAttesterIndices returns the intersection of attester indices from both attestations in this slashing.
func SlashableAttesterIndices(slashing interfaces.AttesterSlashing) []uint64 {
if slashing == nil || slashing.GetFirstAttestation() == nil || slashing.GetSecondAttestation() == nil {
func SlashableAttesterIndices(slashing ethpb.AttSlashing) []uint64 {
if slashing == nil || slashing.FirstAttestation() == nil || slashing.SecondAttestation() == nil {
return nil
}
indices1 := slashing.GetFirstAttestation().GetAttestingIndices()
indices2 := slashing.GetSecondAttestation().GetAttestingIndices()
indices1 := slashing.FirstAttestation().GetAttestingIndices()
indices2 := slashing.SecondAttestation().GetAttestingIndices()
return slice.IntersectionUint64(indices1, indices2)
}

View File

@@ -9,7 +9,6 @@ import (
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -58,7 +57,7 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
AttesterSlashings: slashings,
},
}
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
@@ -97,7 +96,7 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
},
}
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
@@ -153,7 +152,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
},
}
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
@@ -226,7 +225,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
},
}
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
@@ -299,7 +298,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
},
}
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
@@ -372,7 +371,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatusCapella(t *testing.T) {
},
}
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}

View File

@@ -10,7 +10,6 @@ import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
@@ -217,7 +216,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
fuzzer.Fuzz(a)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
r, err := ProcessAttesterSlashings(ctx, s, []interfaces.AttesterSlashing{a}, v.SlashValidator)
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.SlashValidator)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
}
@@ -298,21 +297,6 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
}
}
func TestFuzzVerifyAttestation_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}
attestation := &ethpb.Attestation{}
ctx := context.Background()
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(attestation)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
err = VerifyAttestationSignature(ctx, s, attestation)
_ = err
}
}
func TestFuzzProcessDeposits_10000(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -92,7 +91,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
},
}
ss := make([]interfaces.AttesterSlashing, len(b.Block.Body.AttesterSlashings))
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}

View File

@@ -135,8 +135,6 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
}
func TestVerifyExitAndSignature(t *testing.T) {
undo := util.HackDenebMaxuint(t)
defer undo()
denebSlot, err := slots.EpochStart(params.BeaconConfig().DenebForkEpoch)
require.NoError(t, err)
tests := []struct {

View File

@@ -163,7 +163,42 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{ // Deneb difference.
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),
ReceiptsRoot: make([]byte, 32),
LogsBloom: make([]byte, 256),
PrevRandao: make([]byte, 32),
ExtraData: make([]byte, 0),
BaseFeePerGas: make([]byte, 32),
BlockHash: make([]byte, 32),
Transactions: make([][]byte, 0),
Withdrawals: make([]*enginev1.Withdrawal, 0),
},
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
BlobKzgCommitments: make([][]byte, 0),
},
},
Signature: params.BeaconConfig().EmptySignature[:],
})
case *ethpb.BeaconStateElectra:
return blocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockElectra{
Block: &ethpb.BeaconBlockElectra{
ParentRoot: params.BeaconConfig().ZeroHash[:],
StateRoot: root[:],
Body: &ethpb.BeaconBlockBodyElectra{
RandaoReveal: make([]byte, 96),
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
Graffiti: make([]byte, 32),
SyncAggregate: &ethpb.SyncAggregate{
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
},
ExecutionPayload: &enginev1.ExecutionPayloadElectra{
ParentHash: make([]byte, 32),
FeeRecipient: make([]byte, 20),
StateRoot: make([]byte, 32),

View File

@@ -1,7 +1,6 @@
package blocks_test
import (
"math/big"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
@@ -610,7 +609,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
random, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
require.NoError(t, err)
payload.PrevRandao = random
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload, big.NewInt(0))
wrapped, err := consensusblocks.WrappedExecutionPayloadCapella(payload)
require.NoError(t, err)
_, err = blocks.ProcessPayload(st, wrapped)
require.NoError(t, err)
@@ -874,7 +873,7 @@ func emptyPayloadHeaderCapella() (interfaces.ExecutionData, error) {
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
}, big.NewInt(0))
})
}
func emptyPayload() *enginev1.ExecutionPayload {

View File

@@ -12,12 +12,14 @@ import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"google.golang.org/protobuf/proto"
)
type slashValidatorFunc func(ctx context.Context, st state.BeaconState, vid primitives.ValidatorIndex, penaltyQuotient, proposerRewardQuotient uint64) (state.BeaconState, error)
type slashValidatorFunc func(
ctx context.Context,
st state.BeaconState,
vid primitives.ValidatorIndex) (state.BeaconState, error)
// ProcessProposerSlashings is one of the operations performed
// on each processed beacon block to slash proposers based on
@@ -75,19 +77,7 @@ func ProcessProposerSlashing(
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify proposer slashing")
}
cfg := params.BeaconConfig()
var slashingQuotient uint64
switch {
case beaconState.Version() == version.Phase0:
slashingQuotient = cfg.MinSlashingPenaltyQuotient
case beaconState.Version() == version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
case beaconState.Version() >= version.Bellatrix:
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
default:
return nil, errors.New("unknown state version")
}
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, slashingQuotient, cfg.ProposerRewardQuotient)
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
}

View File

@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
}
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
currentEpoch := slots.ToEpoch(header.Header.Slot)
fork, err := forks.Fork(currentEpoch)
if err != nil {
return err
}
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
if err != nil {
return err
}
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
if err != nil {
return err
}
proposerPubKey := proposer.PublicKey
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
}
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
// from the above method by not using fork data from the state and instead retrieving it
// via the respective epoch.
@@ -179,7 +197,7 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat
func createAttestationSignatureBatch(
ctx context.Context,
beaconState state.ReadOnlyBeaconState,
atts []interfaces.Attestation,
atts []ethpb.Att,
domain []byte,
) (*bls.SignatureBatch, error) {
if len(atts) == 0 {
@@ -192,11 +210,11 @@ func createAttestationSignatureBatch(
descs := make([]string, len(atts))
for i, a := range atts {
sigs[i] = a.GetSignature()
c, err := helpers.BeaconCommitteeFromState(ctx, beaconState, a.GetData().Slot, a.GetData().CommitteeIndex)
committees, err := helpers.AttestationCommittees(ctx, beaconState, a)
if err != nil {
return nil, err
}
ia, err := attestation.ConvertToIndexed(ctx, a, c)
ia, err := attestation.ConvertToIndexed(ctx, a, committees...)
if err != nil {
return nil, err
}
@@ -233,7 +251,7 @@ func createAttestationSignatureBatch(
// AttestationSignatureBatch retrieves all the related attestation signature data such as the relevant public keys,
// signatures and attestation signing data and collate it into a signature batch object.
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []interfaces.Attestation) (*bls.SignatureBatch, error) {
func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []ethpb.Att) (*bls.SignatureBatch, error) {
if len(atts) == 0 {
return bls.NewSet(), nil
}
@@ -243,8 +261,8 @@ func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBe
dt := params.BeaconConfig().DomainBeaconAttester
// Split attestations by fork. Note: the signature domain will differ based on the fork.
var preForkAtts []interfaces.Attestation
var postForkAtts []interfaces.Attestation
var preForkAtts []ethpb.Att
var postForkAtts []ethpb.Att
for _, a := range atts {
if slots.ToEpoch(a.GetData().Slot) < fork.Epoch {
preForkAtts = append(preForkAtts, a)

View File

@@ -145,7 +145,7 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
// state.next_withdrawal_validator_index = next_validator_index
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
expectedWithdrawals, err := st.ExpectedWithdrawals()
expectedWithdrawals, _, err := st.ExpectedWithdrawals()
if err != nil {
return nil, errors.Wrap(err, "could not get expected withdrawals")
}

View File

@@ -1,7 +1,6 @@
package blocks_test
import (
"math/big"
"math/rand"
"testing"
@@ -643,10 +642,7 @@ func TestProcessBlindWithdrawals(t *testing.T) {
require.NoError(t, err)
wdRoot, err := ssz.WithdrawalSliceRoot(test.Args.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(
&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]},
big.NewInt(0),
)
p, err := consensusblocks.WrappedExecutionPayloadHeaderCapella(&enginev1.ExecutionPayloadHeaderCapella{WithdrawalsRoot: wdRoot[:]})
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {
@@ -1064,7 +1060,7 @@ func TestProcessWithdrawals(t *testing.T) {
}
st, err := prepareValidators(spb, test.Args)
require.NoError(t, err)
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals}, big.NewInt(0))
p, err := consensusblocks.WrappedExecutionPayloadCapella(&enginev1.ExecutionPayloadCapella{Withdrawals: test.Args.Withdrawals})
require.NoError(t, err)
post, err := blocks.ProcessWithdrawals(st, p)
if test.Control.ExpectedError {

View File

@@ -0,0 +1,70 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"churn.go",
"consolidations.go",
"deposits.go",
"effective_balance_updates.go",
"registry_updates.go",
"transition.go",
"upgrade.go",
"validator.go",
"withdrawals.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"churn_test.go",
"consolidations_test.go",
"deposits_test.go",
"effective_balance_updates_test.go",
"upgrade_test.go",
"validator_test.go",
],
deps = [
":go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls/blst:go_default_library",
"//crypto/bls/common:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/interop:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
],
)

View File

@@ -0,0 +1,85 @@
package electra
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/math"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// ComputeConsolidationEpochAndUpdateChurn fulfills the consensus spec definition below. This method
// calls mutating methods to the beacon state.
//
// Spec definition:
//
// def compute_consolidation_epoch_and_update_churn(state: BeaconState, consolidation_balance: Gwei) -> Epoch:
// earliest_consolidation_epoch = max(
// state.earliest_consolidation_epoch, compute_activation_exit_epoch(get_current_epoch(state)))
// per_epoch_consolidation_churn = get_consolidation_churn_limit(state)
// # New epoch for consolidations.
// if state.earliest_consolidation_epoch < earliest_consolidation_epoch:
// consolidation_balance_to_consume = per_epoch_consolidation_churn
// else:
// consolidation_balance_to_consume = state.consolidation_balance_to_consume
//
// # Consolidation doesn't fit in the current earliest epoch.
// if consolidation_balance > consolidation_balance_to_consume:
// balance_to_process = consolidation_balance - consolidation_balance_to_consume
// additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
// earliest_consolidation_epoch += additional_epochs
// consolidation_balance_to_consume += additional_epochs * per_epoch_consolidation_churn
//
// # Consume the balance and update state variables.
// state.consolidation_balance_to_consume = consolidation_balance_to_consume - consolidation_balance
// state.earliest_consolidation_epoch = earliest_consolidation_epoch
//
// return state.earliest_consolidation_epoch
func ComputeConsolidationEpochAndUpdateChurn(ctx context.Context, s state.BeaconState, consolidationBalance primitives.Gwei) (primitives.Epoch, error) {
earliestEpoch, err := s.EarliestConsolidationEpoch()
if err != nil {
return 0, err
}
earliestConsolidationEpoch := max(earliestEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(s.Slot())))
activeBal, err := helpers.TotalActiveBalance(s)
if err != nil {
return 0, err
}
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
// New epoch for consolidations.
var consolidationBalanceToConsume primitives.Gwei
if earliestEpoch < earliestConsolidationEpoch {
consolidationBalanceToConsume = perEpochConsolidationChurn
} else {
consolidationBalanceToConsume, err = s.ConsolidationBalanceToConsume()
if err != nil {
return 0, err
}
}
// Consolidation doesn't fit in the current earliest epoch.
if consolidationBalance > consolidationBalanceToConsume {
balanceToProcess := consolidationBalance - consolidationBalanceToConsume
// additional_epochs = (balance_to_process - 1) // per_epoch_consolidation_churn + 1
additionalEpochs, err := math.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
if err != nil {
return 0, err
}
additionalEpochs++
earliestConsolidationEpoch += primitives.Epoch(additionalEpochs)
consolidationBalanceToConsume += primitives.Gwei(additionalEpochs) * perEpochConsolidationChurn
}
// Consume the balance and update state variables.
if err := s.SetConsolidationBalanceToConsume(consolidationBalanceToConsume - consolidationBalance); err != nil {
return 0, err
}
if err := s.SetEarliestConsolidationEpoch(earliestConsolidationEpoch); err != nil {
return 0, err
}
return earliestConsolidationEpoch, nil
}

View File

@@ -0,0 +1,141 @@
package electra_test
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Validator {
num := totalBal / primitives.Gwei(params.BeaconConfig().MinActivationBalance)
vals := make([]*eth.Validator, num)
for i := range vals {
vals[i] = &eth.Validator{
ActivationEpoch: primitives.Epoch(0),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
}
}
if totalBal%primitives.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
vals = append(vals, &eth.Validator{
ActivationEpoch: primitives.Epoch(0),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: uint64(totalBal) % params.BeaconConfig().MinActivationBalance,
})
}
return vals
}
func TestComputeConsolidationEpochAndUpdateChurn(t *testing.T) {
// Test setup: create a state with 32M ETH total active balance.
// In this state, the churn is expected to be 232 ETH per epoch.
tests := []struct {
name string
state state.BeaconState
consolidationBalance primitives.Gwei
expectedEpoch primitives.Epoch
expectedConsolidationBalanceToConsume primitives.Gwei
}{
{
name: "compute consolidation with no consolidation balance",
state: func(t *testing.T) state.BeaconState {
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 9,
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: 0, // 0 ETH
expectedEpoch: 15, // current epoch + 1 + MaxSeedLookahead
expectedConsolidationBalanceToConsume: 232000000000, // 232 ETH
},
{
name: "new epoch for consolidations",
state: func(t *testing.T) state.BeaconState {
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 9,
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: 32000000000, // 32 ETH
expectedEpoch: 15, // current epoch + 1 + MaxSeedLookahead
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
},
{
name: "flows into another epoch",
state: func(t *testing.T) state.BeaconState {
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 9,
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: 235000000000, // 235 ETH
expectedEpoch: 16, // Flows into another epoch.
expectedConsolidationBalanceToConsume: 229000000000, // 229 ETH
},
{
name: "not a new epoch, fits in remaining balance of current epoch",
state: func(t *testing.T) state.BeaconState {
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 15,
ConsolidationBalanceToConsume: 200000000000, // 200 ETH
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: 32000000000, // 32 ETH
expectedEpoch: 15, // Fits into current earliest consolidation epoch.
expectedConsolidationBalanceToConsume: 168000000000, // 126 ETH
},
{
name: "not a new epoch, fits in remaining balance of current epoch",
state: func(t *testing.T) state.BeaconState {
s, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(10),
EarliestConsolidationEpoch: 15,
ConsolidationBalanceToConsume: 200000000000, // 200 ETH
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
})
require.NoError(t, err)
return s
}(t),
consolidationBalance: 232000000000, // 232 ETH
expectedEpoch: 16, // Flows into another epoch.
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotEpoch, err := electra.ComputeConsolidationEpochAndUpdateChurn(context.TODO(), tt.state, tt.consolidationBalance)
require.NoError(t, err)
require.Equal(t, tt.expectedEpoch, gotEpoch)
// Check consolidation balance to consume is set on the state.
cbtc, err := tt.state.ConsolidationBalanceToConsume()
require.NoError(t, err)
require.Equal(t, tt.expectedConsolidationBalanceToConsume, cbtc)
// Check earliest consolidation epoch was set on the state.
gotEpoch, err = tt.state.EarliestConsolidationEpoch()
require.NoError(t, err)
require.Equal(t, tt.expectedEpoch, gotEpoch)
})
}
}

View File

@@ -0,0 +1,258 @@
package electra
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
"go.opencensus.io/trace"
)
// ProcessPendingConsolidations implements the spec definition below. This method makes mutating
// calls to the beacon state.
//
// Spec definition:
//
// def process_pending_consolidations(state: BeaconState) -> None:
// next_pending_consolidation = 0
// for pending_consolidation in state.pending_consolidations:
// source_validator = state.validators[pending_consolidation.source_index]
// if source_validator.slashed:
// next_pending_consolidation += 1
// continue
// if source_validator.withdrawable_epoch > get_current_epoch(state):
// break
//
// # Churn any target excess active balance of target and raise its max
// switch_to_compounding_validator(state, pending_consolidation.target_index)
// # Move active balance to target. Excess balance is withdrawable.
// active_balance = get_active_balance(state, pending_consolidation.source_index)
// decrease_balance(state, pending_consolidation.source_index, active_balance)
// increase_balance(state, pending_consolidation.target_index, active_balance)
// next_pending_consolidation += 1
//
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) error {
ctx, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
defer span.End()
if st == nil || st.IsNil() {
return errors.New("nil state")
}
currentEpoch := slots.ToEpoch(st.Slot())
var nextPendingConsolidation uint64
pendingConsolidations, err := st.PendingConsolidations()
if err != nil {
return err
}
for _, pc := range pendingConsolidations {
sourceValidator, err := st.ValidatorAtIndex(pc.SourceIndex)
if err != nil {
return err
}
if sourceValidator.Slashed {
nextPendingConsolidation++
continue
}
if sourceValidator.WithdrawableEpoch > currentEpoch {
break
}
if err := SwitchToCompoundingValidator(ctx, st, pc.TargetIndex); err != nil {
return err
}
activeBalance, err := st.ActiveBalanceAtIndex(pc.SourceIndex)
if err != nil {
return err
}
if err := helpers.DecreaseBalance(st, pc.SourceIndex, activeBalance); err != nil {
return err
}
if err := helpers.IncreaseBalance(st, pc.TargetIndex, activeBalance); err != nil {
return err
}
nextPendingConsolidation++
}
if nextPendingConsolidation > 0 {
return st.SetPendingConsolidations(pendingConsolidations[nextPendingConsolidation:])
}
return nil
}
// ProcessConsolidations implements the spec definition below. This method makes mutating calls to
// the beacon state.
//
// Spec definition:
//
// def process_consolidation(state: BeaconState, signed_consolidation: SignedConsolidation) -> None:
// # If the pending consolidations queue is full, no consolidations are allowed in the block
// assert len(state.pending_consolidations) < PENDING_CONSOLIDATIONS_LIMIT
// # If there is too little available consolidation churn limit, no consolidations are allowed in the block
// assert get_consolidation_churn_limit(state) > MIN_ACTIVATION_BALANCE
// consolidation = signed_consolidation.message
// # Verify that source != target, so a consolidation cannot be used as an exit.
// assert consolidation.source_index != consolidation.target_index
//
// source_validator = state.validators[consolidation.source_index]
// target_validator = state.validators[consolidation.target_index]
// # Verify the source and the target are active
// current_epoch = get_current_epoch(state)
// assert is_active_validator(source_validator, current_epoch)
// assert is_active_validator(target_validator, current_epoch)
// # Verify exits for source and target have not been initiated
// assert source_validator.exit_epoch == FAR_FUTURE_EPOCH
// assert target_validator.exit_epoch == FAR_FUTURE_EPOCH
// # Consolidations must specify an epoch when they become valid; they are not valid before then
// assert current_epoch >= consolidation.epoch
//
// # Verify the source and the target have Execution layer withdrawal credentials
// assert has_execution_withdrawal_credential(source_validator)
// assert has_execution_withdrawal_credential(target_validator)
// # Verify the same withdrawal address
// assert source_validator.withdrawal_credentials[12:] == target_validator.withdrawal_credentials[12:]
//
// # Verify consolidation is signed by the source and the target
// domain = compute_domain(DOMAIN_CONSOLIDATION, genesis_validators_root=state.genesis_validators_root)
// signing_root = compute_signing_root(consolidation, domain)
// pubkeys = [source_validator.pubkey, target_validator.pubkey]
// assert bls.FastAggregateVerify(pubkeys, signing_root, signed_consolidation.signature)
//
// # Initiate source validator exit and append pending consolidation
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
// state, source_validator.effective_balance)
// source_validator.withdrawable_epoch = Epoch(
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// )
// state.pending_consolidations.append(PendingConsolidation(
// source_index=consolidation.source_index,
// target_index=consolidation.target_index
// ))
func ProcessConsolidations(ctx context.Context, st state.BeaconState, cs []*ethpb.SignedConsolidation) error {
_, span := trace.StartSpan(ctx, "electra.ProcessConsolidations")
defer span.End()
if st == nil || st.IsNil() {
return errors.New("nil state")
}
if len(cs) == 0 {
return nil // Nothing to process.
}
domain, err := signing.ComputeDomain(
params.BeaconConfig().DomainConsolidation,
nil, // Use genesis fork version
st.GenesisValidatorsRoot(),
)
if err != nil {
return err
}
totalBalance, err := helpers.TotalActiveBalance(st)
if err != nil {
return err
}
if helpers.ConsolidationChurnLimit(primitives.Gwei(totalBalance)) <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
return errors.New("too little available consolidation churn limit")
}
currentEpoch := slots.ToEpoch(st.Slot())
for _, c := range cs {
if c == nil || c.Message == nil {
return errors.New("nil consolidation")
}
if n, err := st.NumPendingConsolidations(); err != nil {
return err
} else if n >= params.BeaconConfig().PendingConsolidationsLimit {
return errors.New("pending consolidations queue is full")
}
if c.Message.SourceIndex == c.Message.TargetIndex {
return errors.New("source and target index are the same")
}
source, err := st.ValidatorAtIndex(c.Message.SourceIndex)
if err != nil {
return err
}
target, err := st.ValidatorAtIndex(c.Message.TargetIndex)
if err != nil {
return err
}
if !helpers.IsActiveValidator(source, currentEpoch) {
return errors.New("source is not active")
}
if !helpers.IsActiveValidator(target, currentEpoch) {
return errors.New("target is not active")
}
if source.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return errors.New("source exit epoch has been initiated")
}
if target.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return errors.New("target exit epoch has been initiated")
}
if currentEpoch < c.Message.Epoch {
return errors.New("consolidation is not valid yet")
}
if !helpers.HasExecutionWithdrawalCredentials(source) {
return errors.New("source does not have execution withdrawal credentials")
}
if !helpers.HasExecutionWithdrawalCredentials(target) {
return errors.New("target does not have execution withdrawal credentials")
}
if !helpers.IsSameWithdrawalCredentials(source, target) {
return errors.New("source and target have different withdrawal credentials")
}
sr, err := signing.ComputeSigningRoot(c.Message, domain)
if err != nil {
return err
}
sourcePk, err := bls.PublicKeyFromBytes(source.PublicKey)
if err != nil {
return errors.Wrap(err, "could not convert source public key bytes to bls public key")
}
targetPk, err := bls.PublicKeyFromBytes(target.PublicKey)
if err != nil {
return errors.Wrap(err, "could not convert target public key bytes to bls public key")
}
sig, err := bls.SignatureFromBytes(c.Signature)
if err != nil {
return errors.Wrap(err, "could not convert bytes to signature")
}
if !sig.FastAggregateVerify([]bls.PublicKey{sourcePk, targetPk}, sr) {
return errors.New("consolidation signature verification failed")
}
sEE, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(source.EffectiveBalance))
if err != nil {
return err
}
source.ExitEpoch = sEE
source.WithdrawableEpoch = sEE + params.BeaconConfig().MinValidatorWithdrawabilityDelay
if err := st.UpdateValidatorAtIndex(c.Message.SourceIndex, source); err != nil {
return err
}
if err := st.AppendPendingConsolidation(c.Message.ToPendingConsolidation()); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,441 @@
package electra_test
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
"github.com/prysmaticlabs/prysm/v5/crypto/bls/common"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/interop"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestProcessPendingConsolidations(t *testing.T) {
tests := []struct {
name string
state state.BeaconState
check func(*testing.T, state.BeaconState)
wantErr bool
}{
{
name: "nil state",
state: nil,
wantErr: true,
},
{
name: "no pending consolidations",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{}
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
require.NoError(t, err)
return st
}(),
wantErr: false,
},
{
name: "processes pending consolidation successfully",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{0x01, 0xFF},
},
{
WithdrawalCredentials: []byte{0x01, 0xAB},
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
},
PendingConsolidations: []*eth.PendingConsolidation{
{
SourceIndex: 0,
TargetIndex: 1,
},
},
}
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
// Balances are transferred from v0 to v1.
bal0, err := st.BalanceAtIndex(0)
require.NoError(t, err)
require.Equal(t, uint64(0), bal0)
bal1, err := st.BalanceAtIndex(1)
require.NoError(t, err)
require.Equal(t, 2*params.BeaconConfig().MinActivationBalance, bal1)
// The pending consolidation is removed from the list.
num, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, uint64(0), num)
// v1 is switched to compounding validator.
v1, err := st.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, v1.WithdrawalCredentials[0])
},
wantErr: false,
},
{
name: "stop processing when a source val withdrawable epoch is in the future",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{0x01, 0xFF},
WithdrawableEpoch: 100,
},
{
WithdrawalCredentials: []byte{0x01, 0xAB},
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
},
PendingConsolidations: []*eth.PendingConsolidation{
{
SourceIndex: 0,
TargetIndex: 1,
},
},
}
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
// No balances are transferred from v0 to v1.
bal0, err := st.BalanceAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal0)
bal1, err := st.BalanceAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal1)
// The pending consolidation is still in the list.
num, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, uint64(1), num)
},
wantErr: false,
},
{
name: "slashed validator is not consolidated",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{0x01, 0xFF},
},
{
WithdrawalCredentials: []byte{0x01, 0xAB},
},
{
Slashed: true,
},
{
WithdrawalCredentials: []byte{0x01, 0xCC},
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
},
PendingConsolidations: []*eth.PendingConsolidation{
{
SourceIndex: 2,
TargetIndex: 3,
},
{
SourceIndex: 0,
TargetIndex: 1,
},
},
}
st, err := state_native.InitializeFromProtoUnsafeElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
// No balances are transferred from v2 to v3.
bal0, err := st.BalanceAtIndex(2)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal0)
bal1, err := st.BalanceAtIndex(3)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, bal1)
// No pending consolidation remaining.
num, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, uint64(0), num)
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := electra.ProcessPendingConsolidations(context.TODO(), tt.state)
require.Equal(t, tt.wantErr, err != nil)
if tt.check != nil {
tt.check(t, tt.state)
}
})
}
}
func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
gwei := balETH * 1_000_000_000
balPerVal := params.BeaconConfig().MinActivationBalance
numVals := gwei / balPerVal
vals := make([]*eth.Validator, numVals)
bals := make([]uint64, numVals)
for i := uint64(0); i < numVals; i++ {
wc := make([]byte, 32)
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
wc[31] = byte(i)
vals[i] = &eth.Validator{
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: balPerVal,
WithdrawalCredentials: wc,
}
bals[i] = balPerVal
}
st, err := state_native.InitializeFromProtoUnsafeElectra(&eth.BeaconStateElectra{
Slot: 10 * params.BeaconConfig().SlotsPerEpoch,
Validators: vals,
Balances: bals,
Fork: &eth.Fork{
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
},
})
require.NoError(t, err)
return st
}
func TestProcessConsolidations(t *testing.T) {
secretKeys, publicKeys, err := interop.DeterministicallyGenerateKeys(0, 2)
require.NoError(t, err)
genesisValidatorRoot := bytesutil.PadTo([]byte("genesisValidatorRoot"), fieldparams.RootLength)
_ = secretKeys
tests := []struct {
name string
state state.BeaconState
scs []*eth.SignedConsolidation
check func(*testing.T, state.BeaconState)
wantErr string
}{
{
name: "nil state",
scs: make([]*eth.SignedConsolidation, 10),
wantErr: "nil state",
},
{
name: "nil consolidation in slice",
state: stateWithActiveBalanceETH(t, 19_000_000),
scs: []*eth.SignedConsolidation{nil, nil},
wantErr: "nil consolidation",
},
{
name: "state is 100% full of pending consolidations",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
pc := make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit)
require.NoError(t, st.SetPendingConsolidations(pc))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{}}},
wantErr: "pending consolidations queue is full",
},
{
name: "state has too little consolidation churn limit available to process a consolidation",
state: func() state.BeaconState {
st, _ := util.DeterministicGenesisStateElectra(t, 1)
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{}}},
wantErr: "too little available consolidation churn limit",
},
{
name: "consolidation with source and target as the same index is rejected",
state: stateWithActiveBalanceETH(t, 19_000_000),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 100}}},
wantErr: "source and target index are the same",
},
{
name: "consolidation with inactive source is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.ActivationEpoch = params.BeaconConfig().FarFutureEpoch
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
wantErr: "source is not active",
},
{
name: "consolidation with inactive target is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.ActivationEpoch = params.BeaconConfig().FarFutureEpoch
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
wantErr: "target is not active",
},
{
name: "consolidation with exiting source is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.ExitEpoch = 256
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
wantErr: "source exit epoch has been initiated",
},
{
name: "consolidation with exiting target is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.ExitEpoch = 256
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
wantErr: "target exit epoch has been initiated",
},
{
name: "consolidation with future epoch is rejected",
state: stateWithActiveBalanceETH(t, 19_000_000),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25, Epoch: 55}}},
wantErr: "consolidation is not valid yet",
},
{
name: "source validator without withdrawal credentials is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.WithdrawalCredentials = []byte{}
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 25, TargetIndex: 100}}},
wantErr: "source does not have execution withdrawal credentials",
},
{
name: "target validator without withdrawal credentials is rejected",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
val, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
val.WithdrawalCredentials = []byte{}
require.NoError(t, st.UpdateValidatorAtIndex(25, val))
return st
}(),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
wantErr: "target does not have execution withdrawal credentials",
},
{
name: "source and target with different withdrawal credentials is rejected",
state: stateWithActiveBalanceETH(t, 19_000_000),
scs: []*eth.SignedConsolidation{{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25}}},
wantErr: "source and target have different withdrawal credentials",
},
{
name: "consolidation with valid signatures is OK",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 19_000_000)
require.NoError(t, st.SetGenesisValidatorsRoot(genesisValidatorRoot))
source, err := st.ValidatorAtIndex(100)
require.NoError(t, err)
target, err := st.ValidatorAtIndex(25)
require.NoError(t, err)
source.PublicKey = publicKeys[0].Marshal()
source.WithdrawalCredentials = target.WithdrawalCredentials
require.NoError(t, st.UpdateValidatorAtIndex(100, source))
target.PublicKey = publicKeys[1].Marshal()
require.NoError(t, st.UpdateValidatorAtIndex(25, target))
return st
}(),
scs: func() []*eth.SignedConsolidation {
sc := &eth.SignedConsolidation{Message: &eth.Consolidation{SourceIndex: 100, TargetIndex: 25, Epoch: 8}}
domain, err := signing.ComputeDomain(
params.BeaconConfig().DomainConsolidation,
nil,
genesisValidatorRoot,
)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(sc.Message, domain)
require.NoError(t, err)
sig0 := secretKeys[0].Sign(sr[:])
sig1 := secretKeys[1].Sign(sr[:])
sc.Signature = blst.AggregateSignatures([]common.Signature{sig0, sig1}).Marshal()
return []*eth.SignedConsolidation{sc}
}(),
check: func(t *testing.T, st state.BeaconState) {
source, err := st.ValidatorAtIndex(100)
require.NoError(t, err)
// The consolidated validator is exiting.
require.Equal(t, primitives.Epoch(15), source.ExitEpoch) // 15 = state.Epoch(10) + MIN_SEED_LOOKAHEAD(4) + 1
require.Equal(t, primitives.Epoch(15+params.BeaconConfig().MinValidatorWithdrawabilityDelay), source.WithdrawableEpoch)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := electra.ProcessConsolidations(context.TODO(), tt.state, tt.scs)
if len(tt.wantErr) > 0 {
require.ErrorContains(t, tt.wantErr, err)
} else {
require.NoError(t, err)
}
if tt.check != nil {
tt.check(t, tt.state)
}
})
}
}

View File

@@ -0,0 +1,87 @@
package electra
import (
"context"
"errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
"go.opencensus.io/trace"
)
// ProcessPendingBalanceDeposits implements the spec definition below. This method mutates the state.
//
// Spec definition:
//
// def process_pending_balance_deposits(state: BeaconState) -> None:
// available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
// processed_amount = 0
// next_deposit_index = 0
//
// for deposit in state.pending_balance_deposits:
// if processed_amount + deposit.amount > available_for_processing:
// break
// increase_balance(state, deposit.index, deposit.amount)
// processed_amount += deposit.amount
// next_deposit_index += 1
//
// state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
//
// if len(state.pending_balance_deposits) == 0:
// state.deposit_balance_to_consume = Gwei(0)
// else:
// state.deposit_balance_to_consume = available_for_processing - processed_amount
func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, activeBalance primitives.Gwei) error {
_, span := trace.StartSpan(ctx, "electra.ProcessPendingBalanceDeposits")
defer span.End()
if st == nil || st.IsNil() {
return errors.New("nil state")
}
depBalToConsume, err := st.DepositBalanceToConsume()
if err != nil {
return err
}
availableForProcessing := depBalToConsume + helpers.ActivationExitChurnLimit(activeBalance)
nextDepositIndex := 0
deposits, err := st.PendingBalanceDeposits()
if err != nil {
return err
}
for _, deposit := range deposits {
if primitives.Gwei(deposit.Amount) > availableForProcessing {
break
}
if err := helpers.IncreaseBalance(st, deposit.Index, deposit.Amount); err != nil {
return err
}
availableForProcessing -= primitives.Gwei(deposit.Amount)
nextDepositIndex++
}
deposits = deposits[nextDepositIndex:]
if err := st.SetPendingBalanceDeposits(deposits); err != nil {
return err
}
if len(deposits) == 0 {
return st.SetDepositBalanceToConsume(0)
} else {
return st.SetDepositBalanceToConsume(availableForProcessing)
}
}
// ProcessDepositReceipts is a function as part of electra to process execution layer deposits
func ProcessDepositReceipts(ctx context.Context, beaconState state.BeaconState, receipts []*enginev1.DepositReceipt) (state.BeaconState, error) {
_, span := trace.StartSpan(ctx, "electra.ProcessDepositReceipts")
defer span.End()
// TODO: replace with 6110 logic
// return b.ProcessDepositReceipts(beaconState, receipts)
return beaconState, nil
}

View File

@@ -0,0 +1,128 @@
package electra_test
import (
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestProcessPendingBalanceDeposits(t *testing.T) {
tests := []struct {
name string
state state.BeaconState
wantErr bool
check func(*testing.T, state.BeaconState)
}{
{
name: "nil state fails",
state: nil,
wantErr: true,
},
{
name: "no deposits resets balance to consume",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 1_000)
require.NoError(t, st.SetDepositBalanceToConsume(100))
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
res, err := st.DepositBalanceToConsume()
require.NoError(t, err)
require.Equal(t, primitives.Gwei(0), res)
},
},
{
name: "more deposits than balance to consume processes partial deposits",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 1_000)
require.NoError(t, st.SetDepositBalanceToConsume(100))
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
deps := make([]*eth.PendingBalanceDeposit, 20)
for i := 0; i < len(deps); i += 1 {
deps[i] = &eth.PendingBalanceDeposit{
Amount: uint64(amountAvailForProcessing) / 10,
Index: primitives.ValidatorIndex(i),
}
}
require.NoError(t, st.SetPendingBalanceDeposits(deps))
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
res, err := st.DepositBalanceToConsume()
require.NoError(t, err)
require.Equal(t, primitives.Gwei(100), res)
// Validators 0..9 should have their balance increased
for i := primitives.ValidatorIndex(0); i < 10; i++ {
b, err := st.BalanceAtIndex(i)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/10, b)
}
// Half of the balance deposits should have been processed.
remaining, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 10, len(remaining))
},
},
{
name: "less deposits than balance to consume processes all deposits",
state: func() state.BeaconState {
st := stateWithActiveBalanceETH(t, 1_000)
require.NoError(t, st.SetDepositBalanceToConsume(0))
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
deps := make([]*eth.PendingBalanceDeposit, 5)
for i := 0; i < len(deps); i += 1 {
deps[i] = &eth.PendingBalanceDeposit{
Amount: uint64(amountAvailForProcessing) / 5,
Index: primitives.ValidatorIndex(i),
}
}
require.NoError(t, st.SetPendingBalanceDeposits(deps))
return st
}(),
check: func(t *testing.T, st state.BeaconState) {
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
res, err := st.DepositBalanceToConsume()
require.NoError(t, err)
require.Equal(t, primitives.Gwei(0), res)
// Validators 0..4 should have their balance increased
for i := primitives.ValidatorIndex(0); i < 4; i++ {
b, err := st.BalanceAtIndex(i)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
}
// All of the balance deposits should have been processed.
remaining, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 0, len(remaining))
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var tab uint64
var err error
if tt.state != nil {
// The caller of this method would normally have the precompute balance values for total
// active balance for this epoch. For ease of test setup, we will compute total active
// balance from the given state.
tab, err = helpers.TotalActiveBalance(tt.state)
}
require.NoError(t, err)
err = electra.ProcessPendingBalanceDeposits(context.TODO(), tt.state, primitives.Gwei(tab))
require.Equal(t, tt.wantErr, err != nil, "wantErr=%v, got err=%s", tt.wantErr, err)
if tt.check != nil {
tt.check(t, tt.state)
}
})
}
}

View File

@@ -0,0 +1,65 @@
package electra
import (
"fmt"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
// ProcessEffectiveBalanceUpdates processes effective balance updates during epoch processing.
//
// Spec pseudocode definition:
//
// def process_effective_balance_updates(state: BeaconState) -> None:
// # Update effective balances with hysteresis
// for index, validator in enumerate(state.validators):
// balance = state.balances[index]
// HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT)
// DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
// UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
// EFFECTIVE_BALANCE_LIMIT = (
// MAX_EFFECTIVE_BALANCE_EIP7251 if has_compounding_withdrawal_credential(validator)
// else MIN_ACTIVATION_BALANCE
// )
//
// if (
// balance + DOWNWARD_THRESHOLD < validator.effective_balance
// or validator.effective_balance + UPWARD_THRESHOLD < balance
// ):
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, EFFECTIVE_BALANCE_LIMIT)
func ProcessEffectiveBalanceUpdates(state state.BeaconState) error {
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
bals := state.Balances()
// Update effective balances with hysteresis.
validatorFunc := func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error) {
if val == nil {
return false, nil, fmt.Errorf("validator %d is nil in state", idx)
}
if idx >= len(bals) {
return false, nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(state.Balances()))
}
balance := bals[idx]
effectiveBalanceLimit := params.BeaconConfig().MinActivationBalance
if helpers.HasCompoundingWithdrawalCredential(val) {
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
}
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
val.EffectiveBalance = effectiveBal
return false, val, nil
}
return false, val, nil
}
return state.ApplyToEveryValidator(validatorFunc)
}

View File

@@ -0,0 +1,144 @@
package electra_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestProcessEffectiveBalnceUpdates(t *testing.T) {
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
tests := []struct {
name string
state state.BeaconState
wantErr bool
check func(*testing.T, state.BeaconState)
}{
{
name: "validator with compounding withdrawal credentials updates effective balance",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
},
},
Balances: []uint64{
params.BeaconConfig().MaxEffectiveBalanceElectra * 2,
},
}
st, err := state_native.InitializeFromProtoElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, bs state.BeaconState) {
val, err := bs.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalanceElectra, val.EffectiveBalance)
},
},
{
name: "validator without compounding withdrawal credentials updates effective balance",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance / 2,
WithdrawalCredentials: nil,
},
},
Balances: []uint64{
params.BeaconConfig().MaxEffectiveBalanceElectra,
},
}
st, err := state_native.InitializeFromProtoElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, bs state.BeaconState) {
val, err := bs.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
},
},
{
name: "validator effective balance moves only when outside of threshold",
state: func() state.BeaconState {
pb := &eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
},
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
},
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
},
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
WithdrawalCredentials: []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte, 0x11},
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance - downwardThreshold - 1, // beyond downward threshold
params.BeaconConfig().MinActivationBalance - downwardThreshold + 1, // within downward threshold
params.BeaconConfig().MinActivationBalance + upwardThreshold + 1, // beyond upward threshold
params.BeaconConfig().MinActivationBalance + upwardThreshold - 1, // within upward threshold
},
}
st, err := state_native.InitializeFromProtoElectra(pb)
require.NoError(t, err)
return st
}(),
check: func(t *testing.T, bs state.BeaconState) {
// validator 0 has a balance diff exceeding the threshold so a diff should be applied to
// effective balance and it moves by effective balance increment.
val, err := bs.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance-params.BeaconConfig().EffectiveBalanceIncrement, val.EffectiveBalance)
// validator 1 has a balance diff within the threshold so the effective balance should not
// have changed.
val, err = bs.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
// Validator 2 has a balance diff exceeding the threshold so a diff should be applied to the
// effective balance and it moves by effective balance increment.
val, err = bs.ValidatorAtIndex(2)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance+params.BeaconConfig().EffectiveBalanceIncrement, val.EffectiveBalance)
// Validator 3 has a balance diff within the threshold so the effective balance should not
// have changed.
val, err = bs.ValidatorAtIndex(3)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, val.EffectiveBalance)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := electra.ProcessEffectiveBalanceUpdates(tt.state)
require.Equal(t, tt.wantErr, err != nil, "unexpected error returned wanted error=nil (%s), got error=%s", tt.wantErr, err)
if tt.check != nil {
tt.check(t, tt.state)
}
})
}
}

View File

@@ -0,0 +1,34 @@
package electra
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
)
// ProcessRegistryUpdates rotates validators in and out of active pool.
// the amount to rotate is determined churn limit.
//
// Spec pseudocode definition:
//
// def process_registry_updates(state: BeaconState) -> None:
// # Process activation eligibility and ejections
// for index, validator in enumerate(state.validators):
// if is_eligible_for_activation_queue(validator):
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
//
// if (
// is_active_validator(validator, get_current_epoch(state))
// and validator.effective_balance <= EJECTION_BALANCE
// ):
// initiate_validator_exit(state, ValidatorIndex(index))
//
// # Activate all eligible validators
// activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
// for validator in state.validators:
// if is_eligible_for_activation(state, validator):
// validator.activation_epoch = activation_epoch
func ProcessRegistryUpdates(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
// TODO: replace with real implementation
return state, nil
}

View File

@@ -0,0 +1,125 @@
package electra
import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"go.opencensus.io/trace"
)
// Re-exports for methods that haven't changed in Electra.
var (
InitializePrecomputeValidators = altair.InitializePrecomputeValidators
ProcessEpochParticipation = altair.ProcessEpochParticipation
ProcessInactivityScores = altair.ProcessInactivityScores
ProcessRewardsAndPenaltiesPrecompute = altair.ProcessRewardsAndPenaltiesPrecompute
ProcessSlashings = e.ProcessSlashings
ProcessEth1DataReset = e.ProcessEth1DataReset
ProcessSlashingsReset = e.ProcessSlashingsReset
ProcessRandaoMixesReset = e.ProcessRandaoMixesReset
ProcessHistoricalDataUpdate = e.ProcessHistoricalDataUpdate
ProcessParticipationFlagUpdates = altair.ProcessParticipationFlagUpdates
ProcessSyncCommitteeUpdates = altair.ProcessSyncCommitteeUpdates
AttestationsDelta = altair.AttestationsDelta
ProcessSyncAggregate = altair.ProcessSyncAggregate
)
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
// It's optimized by pre computing validator attested info and epoch total/attested balances upfront.
//
// Spec definition:
//
// def process_epoch(state: BeaconState) -> None:
// process_justification_and_finalization(state)
// process_inactivity_updates(state)
// process_rewards_and_penalties(state)
// process_registry_updates(state)
// process_slashings(state)
// process_eth1_data_reset(state)
// process_pending_balance_deposits(state) # New in EIP7251
// process_pending_consolidations(state) # New in EIP7251
// process_effective_balance_updates(state)
// process_slashings_reset(state)
// process_randao_mixes_reset(state)
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
_, span := trace.StartSpan(ctx, "electra.ProcessEpoch")
defer span.End()
if state == nil || state.IsNil() {
return nil, errors.New("nil state")
}
vp, bp, err := InitializePrecomputeValidators(ctx, state)
if err != nil {
return nil, err
}
vp, bp, err = ProcessEpochParticipation(ctx, state, bp, vp)
if err != nil {
return nil, err
}
state, err = precompute.ProcessJustificationAndFinalizationPreCompute(state, bp)
if err != nil {
return nil, errors.Wrap(err, "could not process justification")
}
state, vp, err = ProcessInactivityScores(ctx, state, vp)
if err != nil {
return nil, errors.Wrap(err, "could not process inactivity updates")
}
state, err = ProcessRewardsAndPenaltiesPrecompute(state, bp, vp)
if err != nil {
return nil, errors.Wrap(err, "could not process rewards and penalties")
}
state, err = ProcessRegistryUpdates(ctx, state)
if err != nil {
return nil, errors.Wrap(err, "could not process registry updates")
}
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
if err != nil {
return nil, err
}
state, err = ProcessSlashings(state, proportionalSlashingMultiplier)
if err != nil {
return nil, err
}
state, err = ProcessEth1DataReset(state)
if err != nil {
return nil, err
}
if err = ProcessPendingBalanceDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
return nil, err
}
if err := ProcessPendingConsolidations(ctx, state); err != nil {
return nil, err
}
if err := ProcessEffectiveBalanceUpdates(state); err != nil {
return nil, err
}
state, err = ProcessSlashingsReset(state)
if err != nil {
return nil, err
}
state, err = ProcessRandaoMixesReset(state)
if err != nil {
return nil, err
}
state, err = ProcessHistoricalDataUpdate(state)
if err != nil {
return nil, err
}
state, err = ProcessParticipationFlagUpdates(state)
if err != nil {
return nil, err
}
state, err = ProcessSyncCommitteeUpdates(ctx, state)
if err != nil {
return nil, err
}
return state, nil
}

View File

@@ -0,0 +1,311 @@
package electra
import (
"sort"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
// UpgradeToElectra updates inputs a generic state to return the version Electra state.
// def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState:
//
// epoch = deneb.get_current_epoch(pre)
// latest_execution_payload_header = ExecutionPayloadHeader(
// parent_hash=pre.latest_execution_payload_header.parent_hash,
// fee_recipient=pre.latest_execution_payload_header.fee_recipient,
// state_root=pre.latest_execution_payload_header.state_root,
// receipts_root=pre.latest_execution_payload_header.receipts_root,
// logs_bloom=pre.latest_execution_payload_header.logs_bloom,
// prev_randao=pre.latest_execution_payload_header.prev_randao,
// block_number=pre.latest_execution_payload_header.block_number,
// gas_limit=pre.latest_execution_payload_header.gas_limit,
// gas_used=pre.latest_execution_payload_header.gas_used,
// timestamp=pre.latest_execution_payload_header.timestamp,
// extra_data=pre.latest_execution_payload_header.extra_data,
// base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
// block_hash=pre.latest_execution_payload_header.block_hash,
// transactions_root=pre.latest_execution_payload_header.transactions_root,
// withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
// blob_gas_used=pre.latest_execution_payload_header.blob_gas_used,
// excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
// deposit_receipts_root=Root(), # [New in Electra:EIP6110]
// withdrawal_requests_root=Root(), # [New in Electra:EIP7002],
// )
//
// exit_epochs = [v.exit_epoch for v in pre.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
// if not exit_epochs:
// exit_epochs = [get_current_epoch(pre)]
// earliest_exit_epoch = max(exit_epochs) + 1
//
// post = BeaconState(
// # Versioning
// genesis_time=pre.genesis_time,
// genesis_validators_root=pre.genesis_validators_root,
// slot=pre.slot,
// fork=Fork(
// previous_version=pre.fork.current_version,
// current_version=ELECTRA_FORK_VERSION, # [Modified in Electra:EIP6110]
// epoch=epoch,
// ),
// # History
// latest_block_header=pre.latest_block_header,
// block_roots=pre.block_roots,
// state_roots=pre.state_roots,
// historical_roots=pre.historical_roots,
// # Eth1
// eth1_data=pre.eth1_data,
// eth1_data_votes=pre.eth1_data_votes,
// eth1_deposit_index=pre.eth1_deposit_index,
// # Registry
// validators=pre.validators,
// balances=pre.balances,
// # Randomness
// randao_mixes=pre.randao_mixes,
// # Slashings
// slashings=pre.slashings,
// # Participation
// previous_epoch_participation=pre.previous_epoch_participation,
// current_epoch_participation=pre.current_epoch_participation,
// # Finality
// justification_bits=pre.justification_bits,
// previous_justified_checkpoint=pre.previous_justified_checkpoint,
// current_justified_checkpoint=pre.current_justified_checkpoint,
// finalized_checkpoint=pre.finalized_checkpoint,
// # Inactivity
// inactivity_scores=pre.inactivity_scores,
// # Sync
// current_sync_committee=pre.current_sync_committee,
// next_sync_committee=pre.next_sync_committee,
// # Execution-layer
// latest_execution_payload_header=latest_execution_payload_header, # [Modified in Electra:EIP6110:EIP7002]
// # Withdrawals
// next_withdrawal_index=pre.next_withdrawal_index,
// next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
// # Deep history valid from Capella onwards
// historical_summaries=pre.historical_summaries,
// # [New in Electra:EIP6110]
// deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX,
// # [New in Electra:EIP7251]
// deposit_balance_to_consume=0,
// exit_balance_to_consume=0,
// earliest_exit_epoch=earliest_exit_epoch,
// consolidation_balance_to_consume=0,
// earliest_consolidation_epoch=compute_activation_exit_epoch(get_current_epoch(pre)),
// pending_balance_deposits=[],
// pending_partial_withdrawals=[],
// pending_consolidations=[],
// )
//
// post.exit_balance_to_consume = get_activation_exit_churn_limit(post)
// post.consolidation_balance_to_consume = get_consolidation_churn_limit(post)
//
// # [New in Electra:EIP7251]
// # add validators that are not yet active to pending balance deposits
// pre_activation = sorted([
// index for index, validator in enumerate(post.validators)
// if validator.activation_epoch == FAR_FUTURE_EPOCH
// ], key=lambda index: (
// post.validators[index].activation_eligibility_epoch,
// index
// ))
//
// for index in pre_activation:
// queue_entire_balance_and_reset_validator(post, ValidatorIndex(index))
//
// # Ensure early adopters of compounding credentials go through the activation churn
// for index, validator in enumerate(post.validators):
// if has_compounding_withdrawal_credential(validator):
// queue_excess_active_balance(post, ValidatorIndex(index))
//
// return post
func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error) {
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
if err != nil {
return nil, err
}
nextSyncCommittee, err := beaconState.NextSyncCommittee()
if err != nil {
return nil, err
}
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
if err != nil {
return nil, err
}
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
if err != nil {
return nil, err
}
inactivityScores, err := beaconState.InactivityScores()
if err != nil {
return nil, err
}
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
if err != nil {
return nil, err
}
txRoot, err := payloadHeader.TransactionsRoot()
if err != nil {
return nil, err
}
wdRoot, err := payloadHeader.WithdrawalsRoot()
if err != nil {
return nil, err
}
wi, err := beaconState.NextWithdrawalIndex()
if err != nil {
return nil, err
}
vi, err := beaconState.NextWithdrawalValidatorIndex()
if err != nil {
return nil, err
}
summaries, err := beaconState.HistoricalSummaries()
if err != nil {
return nil, err
}
historicalRoots, err := beaconState.HistoricalRoots()
if err != nil {
return nil, err
}
excessBlobGas, err := payloadHeader.ExcessBlobGas()
if err != nil {
return nil, err
}
blobGasUsed, err := payloadHeader.BlobGasUsed()
if err != nil {
return nil, err
}
// [New in Electra:EIP7251]
earliestExitEpoch := time.CurrentEpoch(beaconState)
preActivationIndices := make([]primitives.ValidatorIndex, 0)
compoundWithdrawalIndices := make([]primitives.ValidatorIndex, 0)
if err = beaconState.ReadFromEveryValidator(func(index int, val state.ReadOnlyValidator) error {
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch && val.ExitEpoch() > earliestExitEpoch {
earliestExitEpoch = val.ExitEpoch()
}
if val.ActivationEpoch() == params.BeaconConfig().FarFutureEpoch {
preActivationIndices = append(preActivationIndices, primitives.ValidatorIndex(index))
}
if helpers.HasCompoundingWithdrawalCredential(val) {
compoundWithdrawalIndices = append(compoundWithdrawalIndices, primitives.ValidatorIndex(index))
}
return nil
}); err != nil {
return nil, err
}
earliestExitEpoch++ // Increment to find the earliest possible exit epoch
// note: should be the same in prestate and post beaconState.
// we are deviating from the specs a bit as it calls for using the post beaconState
tab, err := helpers.TotalActiveBalance(beaconState)
if err != nil {
return nil, errors.Wrap(err, "failed to get total active balance")
}
s := &ethpb.BeaconStateElectra{
GenesisTime: beaconState.GenesisTime(),
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
Slot: beaconState.Slot(),
Fork: &ethpb.Fork{
PreviousVersion: beaconState.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
Epoch: time.CurrentEpoch(beaconState),
},
LatestBlockHeader: beaconState.LatestBlockHeader(),
BlockRoots: beaconState.BlockRoots(),
StateRoots: beaconState.StateRoots(),
HistoricalRoots: historicalRoots,
Eth1Data: beaconState.Eth1Data(),
Eth1DataVotes: beaconState.Eth1DataVotes(),
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
Validators: beaconState.Validators(),
Balances: beaconState.Balances(),
RandaoMixes: beaconState.RandaoMixes(),
Slashings: beaconState.Slashings(),
PreviousEpochParticipation: prevEpochParticipation,
CurrentEpochParticipation: currentEpochParticipation,
JustificationBits: beaconState.JustificationBits(),
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
InactivityScores: inactivityScores,
CurrentSyncCommittee: currentSyncCommittee,
NextSyncCommittee: nextSyncCommittee,
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderElectra{
ParentHash: payloadHeader.ParentHash(),
FeeRecipient: payloadHeader.FeeRecipient(),
StateRoot: payloadHeader.StateRoot(),
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
LogsBloom: payloadHeader.LogsBloom(),
PrevRandao: payloadHeader.PrevRandao(),
BlockNumber: payloadHeader.BlockNumber(),
GasLimit: payloadHeader.GasLimit(),
GasUsed: payloadHeader.GasUsed(),
Timestamp: payloadHeader.Timestamp(),
ExtraData: payloadHeader.ExtraData(),
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
BlockHash: payloadHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
ExcessBlobGas: excessBlobGas,
BlobGasUsed: blobGasUsed,
DepositReceiptsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
WithdrawalRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7002]
},
NextWithdrawalIndex: wi,
NextWithdrawalValidatorIndex: vi,
HistoricalSummaries: summaries,
DepositReceiptsStartIndex: params.BeaconConfig().UnsetDepositReceiptsStartIndex,
DepositBalanceToConsume: 0,
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(primitives.Gwei(tab)),
EarliestExitEpoch: earliestExitEpoch,
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(primitives.Gwei(tab)),
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
PendingBalanceDeposits: make([]*ethpb.PendingBalanceDeposit, 0),
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
}
// Sorting preActivationIndices based on a custom criteria
sort.Slice(preActivationIndices, func(i, j int) bool {
// Comparing based on ActivationEligibilityEpoch and then by index if the epochs are the same
if s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch == s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch {
return preActivationIndices[i] < preActivationIndices[j]
}
return s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch < s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch
})
// need to cast the beaconState to use in helper functions
post, err := state_native.InitializeFromProtoUnsafeElectra(s)
if err != nil {
return nil, errors.Wrap(err, "failed to initialize post electra beaconState")
}
for _, index := range preActivationIndices {
if err := helpers.QueueEntireBalanceAndResetValidator(post, index); err != nil {
return nil, errors.Wrap(err, "failed to queue entire balance and reset validator")
}
}
// Ensure early adopters of compounding credentials go through the activation churn
for _, index := range compoundWithdrawalIndices {
if err := helpers.QueueExcessActiveBalance(post, index); err != nil {
return nil, errors.Wrap(err, "failed to queue excess active balance")
}
}
return post, nil
}

View File

@@ -0,0 +1,188 @@
package electra_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestUpgradeToElectra(t *testing.T) {
st, _ := util.DeterministicGenesisStateDeneb(t, params.BeaconConfig().MaxValidatorsPerCommittee)
require.NoError(t, st.SetHistoricalRoots([][]byte{{1}}))
vals := st.Validators()
vals[0].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
vals[1].WithdrawalCredentials = []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}
require.NoError(t, st.SetValidators(vals))
bals := st.Balances()
bals[1] = params.BeaconConfig().MinActivationBalance + 1000
require.NoError(t, st.SetBalances(bals))
preForkState := st.Copy()
mSt, err := electra.UpgradeToElectra(st)
require.NoError(t, err)
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
require.Equal(t, preForkState.Slot(), mSt.Slot())
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
require.DeepSSZEqual(t, preForkState.Validators()[2:], mSt.Validators()[2:])
require.DeepSSZEqual(t, preForkState.Balances()[2:], mSt.Balances()[2:])
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
require.Equal(t, len(preForkState.Validators()), len(mSt.Validators()))
preVal, err := preForkState.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, preVal.EffectiveBalance)
preVal2, err := preForkState.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, preVal2.EffectiveBalance)
mVal, err := mSt.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, uint64(0), mVal.EffectiveBalance)
mVal2, err := mSt.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, mVal2.EffectiveBalance)
numValidators := mSt.NumValidators()
p, err := mSt.PreviousEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
p, err = mSt.CurrentEpochParticipation()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]byte, numValidators), p)
s, err := mSt.InactivityScores()
require.NoError(t, err)
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
hr1, err := preForkState.HistoricalRoots()
require.NoError(t, err)
hr2, err := mSt.HistoricalRoots()
require.NoError(t, err)
require.DeepEqual(t, hr1, hr2)
f := mSt.Fork()
require.DeepSSZEqual(t, &ethpb.Fork{
PreviousVersion: st.Fork().CurrentVersion,
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
Epoch: time.CurrentEpoch(st),
}, f)
csc, err := mSt.CurrentSyncCommittee()
require.NoError(t, err)
psc, err := preForkState.CurrentSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, csc)
nsc, err := mSt.NextSyncCommittee()
require.NoError(t, err)
psc, err = preForkState.NextSyncCommittee()
require.NoError(t, err)
require.DeepSSZEqual(t, psc, nsc)
header, err := mSt.LatestExecutionPayloadHeader()
require.NoError(t, err)
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
require.Equal(t, true, ok)
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
require.NoError(t, err)
txRoot, err := prevHeader.TransactionsRoot()
require.NoError(t, err)
wdRoot, err := prevHeader.WithdrawalsRoot()
require.NoError(t, err)
wanted := &enginev1.ExecutionPayloadHeaderElectra{
ParentHash: prevHeader.ParentHash(),
FeeRecipient: prevHeader.FeeRecipient(),
StateRoot: prevHeader.StateRoot(),
ReceiptsRoot: prevHeader.ReceiptsRoot(),
LogsBloom: prevHeader.LogsBloom(),
PrevRandao: prevHeader.PrevRandao(),
BlockNumber: prevHeader.BlockNumber(),
GasLimit: prevHeader.GasLimit(),
GasUsed: prevHeader.GasUsed(),
Timestamp: prevHeader.Timestamp(),
ExtraData: prevHeader.ExtraData(),
BaseFeePerGas: prevHeader.BaseFeePerGas(),
BlockHash: prevHeader.BlockHash(),
TransactionsRoot: txRoot,
WithdrawalsRoot: wdRoot,
DepositReceiptsRoot: bytesutil.Bytes32(0),
WithdrawalRequestsRoot: bytesutil.Bytes32(0),
}
require.DeepEqual(t, wanted, protoHeader)
nwi, err := mSt.NextWithdrawalIndex()
require.NoError(t, err)
require.Equal(t, uint64(0), nwi)
lwvi, err := mSt.NextWithdrawalValidatorIndex()
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(0), lwvi)
summaries, err := mSt.HistoricalSummaries()
require.NoError(t, err)
require.Equal(t, 0, len(summaries))
startIndex, err := mSt.DepositReceiptsStartIndex()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().UnsetDepositReceiptsStartIndex, startIndex)
balance, err := mSt.DepositBalanceToConsume()
require.NoError(t, err)
require.Equal(t, primitives.Gwei(0), balance)
tab, err := helpers.TotalActiveBalance(mSt)
require.NoError(t, err)
ebtc, err := mSt.ExitBalanceToConsume()
require.NoError(t, err)
require.Equal(t, helpers.ActivationExitChurnLimit(primitives.Gwei(tab)), ebtc)
eee, err := mSt.EarliestExitEpoch()
require.NoError(t, err)
require.Equal(t, primitives.Epoch(1), eee)
cbtc, err := mSt.ConsolidationBalanceToConsume()
require.NoError(t, err)
require.Equal(t, helpers.ConsolidationChurnLimit(primitives.Gwei(tab)), cbtc)
earliestConsolidationEpoch, err := mSt.EarliestConsolidationEpoch()
require.NoError(t, err)
require.Equal(t, helpers.ActivationExitEpoch(slots.ToEpoch(preForkState.Slot())), earliestConsolidationEpoch)
pendingBalanceDeposits, err := mSt.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 2, len(pendingBalanceDeposits))
require.Equal(t, uint64(1000), pendingBalanceDeposits[1].Amount)
numPendingPartialWithdrawals, err := mSt.NumPendingPartialWithdrawals()
require.NoError(t, err)
require.Equal(t, uint64(0), numPendingPartialWithdrawals)
consolidations, err := mSt.PendingConsolidations()
require.NoError(t, err)
require.Equal(t, 0, len(consolidations))
}

View File

@@ -0,0 +1,105 @@
package electra
import (
"context"
"errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// SwitchToCompoundingValidator
//
// Spec definition:
//
// def switch_to_compounding_validator(state: BeaconState, index: ValidatorIndex) -> None:
// validator = state.validators[index]
// if has_eth1_withdrawal_credential(validator):
// validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
// queue_excess_active_balance(state, index)
func SwitchToCompoundingValidator(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
v, err := s.ValidatorAtIndex(idx)
if err != nil {
return err
}
if len(v.WithdrawalCredentials) == 0 {
return errors.New("validator has no withdrawal credentials")
}
if helpers.HasETH1WithdrawalCredential(v) {
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
return err
}
return queueExcessActiveBalance(ctx, s, idx)
}
return nil
}
// queueExcessActiveBalance
//
// Spec definition:
//
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
// balance = state.balances[index]
// if balance > MIN_ACTIVATION_BALANCE:
// excess_balance = balance - MIN_ACTIVATION_BALANCE
// state.balances[index] = MIN_ACTIVATION_BALANCE
// state.pending_balance_deposits.append(
// PendingBalanceDeposit(index=index, amount=excess_balance)
// )
func queueExcessActiveBalance(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
bal, err := s.BalanceAtIndex(idx)
if err != nil {
return err
}
if bal > params.BeaconConfig().MinActivationBalance {
excessBalance := bal - params.BeaconConfig().MinActivationBalance
if err := s.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
return err
}
return s.AppendPendingBalanceDeposit(idx, excessBalance)
}
return nil
}
// QueueEntireBalanceAndResetValidator queues the entire balance and resets the validator. This is used in electra fork logic.
//
// Spec definition:
//
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
// balance = state.balances[index]
// state.balances[index] = 0
// validator = state.validators[index]
// validator.effective_balance = 0
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
// state.pending_balance_deposits.append(
// PendingBalanceDeposit(index=index, amount=balance)
// )
//
//nolint:dupword
func QueueEntireBalanceAndResetValidator(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex) error {
bal, err := s.BalanceAtIndex(idx)
if err != nil {
return err
}
if err := s.UpdateBalancesAtIndex(idx, 0); err != nil {
return err
}
v, err := s.ValidatorAtIndex(idx)
if err != nil {
return err
}
v.EffectiveBalance = 0
v.ActivationEligibilityEpoch = params.BeaconConfig().FarFutureEpoch
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
return err
}
return s.AppendPendingBalanceDeposit(idx, bal)
}

View File

@@ -0,0 +1,90 @@
package electra_test
import (
"bytes"
"context"
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)
func TestSwitchToCompoundingValidator(t *testing.T) {
s, err := state_native.InitializeFromProtoElectra(&eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
WithdrawalCredentials: []byte{}, // No withdrawal credentials
},
{
WithdrawalCredentials: []byte{0x01, 0xFF}, // Has withdrawal credentials
},
{
WithdrawalCredentials: []byte{0x01, 0xFF}, // Has withdrawal credentials
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance,
params.BeaconConfig().MinActivationBalance + 100_000, // Has excess balance
},
})
// Test that a validator with no withdrawal credentials cannot be switched to compounding.
require.NoError(t, err)
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(context.TODO(), s, 0))
// Test that a validator with withdrawal credentials can be switched to compounding.
require.NoError(t, electra.SwitchToCompoundingValidator(context.TODO(), s, 1))
v, err := s.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, true, bytes.HasPrefix(v.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}), "withdrawal credentials were not updated")
// val_1 Balance is not changed
b, err := s.BalanceAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was changed")
pbd, err := s.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
// Test that a validator with excess balance can be switched to compounding, excess balance is queued.
require.NoError(t, electra.SwitchToCompoundingValidator(context.TODO(), s, 2))
b, err = s.BalanceAtIndex(2)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
pbd, err = s.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 1, len(pbd), "pending balance deposits should have one element")
require.Equal(t, uint64(100_000), pbd[0].Amount, "pending balance deposit amount is incorrect")
require.Equal(t, primitives.ValidatorIndex(2), pbd[0].Index, "pending balance deposit index is incorrect")
}
func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
s, err := state_native.InitializeFromProtoElectra(&eth.BeaconStateElectra{
Validators: []*eth.Validator{
{
EffectiveBalance: params.BeaconConfig().MinActivationBalance + 100_000,
ActivationEligibilityEpoch: primitives.Epoch(100),
},
},
Balances: []uint64{
params.BeaconConfig().MinActivationBalance + 100_000,
},
})
require.NoError(t, err)
require.NoError(t, electra.QueueEntireBalanceAndResetValidator(context.TODO(), s, 0))
b, err := s.BalanceAtIndex(0)
require.NoError(t, err)
require.Equal(t, uint64(0), b, "balance was not changed")
v, err := s.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, uint64(0), v.EffectiveBalance, "effective balance was not reset")
require.Equal(t, params.BeaconConfig().FarFutureEpoch, v.ActivationEligibilityEpoch, "activation eligibility epoch was not reset")
pbd, err := s.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 1, len(pbd), "pending balance deposits should have one element")
require.Equal(t, params.BeaconConfig().MinActivationBalance+100_000, pbd[0].Amount, "pending balance deposit amount is incorrect")
require.Equal(t, primitives.ValidatorIndex(0), pbd[0].Index, "pending balance deposit index is incorrect")
}

View File

@@ -0,0 +1,80 @@
package electra
import (
"context"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
)
// ProcessExecutionLayerWithdrawRequests processes the validator withdrawals from the provided execution payload
// into the beacon state triggered by the execution layer.
//
// Spec pseudocode definition:
//
// def process_execution_layer_withdrawal_request(
//
// state: BeaconState,
// execution_layer_withdrawal_request: ExecutionLayerWithdrawalRequest
//
// ) -> None:
// amount = execution_layer_withdrawal_request.amount
// is_full_exit_request = amount == FULL_EXIT_REQUEST_AMOUNT
//
// # If partial withdrawal queue is full, only full exits are processed
// if len(state.pending_partial_withdrawals) == PENDING_PARTIAL_WITHDRAWALS_LIMIT and not is_full_exit_request:
// return
//
// validator_pubkeys = [v.pubkey for v in state.validators]
// # Verify pubkey exists
// request_pubkey = execution_layer_withdrawal_request.validator_pubkey
// if request_pubkey not in validator_pubkeys:
// return
// index = ValidatorIndex(validator_pubkeys.index(request_pubkey))
// validator = state.validators[index]
//
// # Verify withdrawal credentials
// has_correct_credential = has_execution_withdrawal_credential(validator)
// is_correct_source_address = (
// validator.withdrawal_credentials[12:] == execution_layer_withdrawal_request.source_address
// )
// if not (has_correct_credential and is_correct_source_address):
// return
// # Verify the validator is active
// if not is_active_validator(validator, get_current_epoch(state)):
// return
// # Verify exit has not been initiated
// if validator.exit_epoch != FAR_FUTURE_EPOCH:
// return
// # Verify the validator has been active long enough
// if get_current_epoch(state) < validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
// return
//
// pending_balance_to_withdraw = get_pending_balance_to_withdraw(state, index)
//
// if is_full_exit_request:
// # Only exit validator if it has no pending withdrawals in the queue
// if pending_balance_to_withdraw == 0:
// initiate_validator_exit(state, index)
// return
//
// has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
// has_excess_balance = state.balances[index] > MIN_ACTIVATION_BALANCE + pending_balance_to_withdraw
//
// # Only allow partial withdrawals with compounding withdrawal credentials
// if has_compounding_withdrawal_credential(validator) and has_sufficient_effective_balance and has_excess_balance:
// to_withdraw = min(
// state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,
// amount
// )
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, to_withdraw)
// withdrawable_epoch = Epoch(exit_queue_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
// state.pending_partial_withdrawals.append(PendingPartialWithdrawal(
// index=index,
// amount=to_withdraw,
// withdrawable_epoch=withdrawable_epoch,
// ))
func ProcessExecutionLayerWithdrawRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.ExecutionLayerWithdrawalRequest) (state.BeaconState, error) {
//TODO: replace with real implementation
return st, nil
}

View File

@@ -470,11 +470,11 @@ func UnslashedAttestingIndices(ctx context.Context, state state.ReadOnlyBeaconSt
seen := make(map[uint64]bool)
for _, att := range atts {
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex)
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return nil, err
}
attestingIndices, err := attestation.AttestingIndices(att.AggregationBits, committee)
attestingIndices, err := attestation.AttestingIndices(att, committee)
if err != nil {
return nil, err
}

View File

@@ -58,7 +58,7 @@ func ProcessAttestations(
if err != nil {
return nil, nil, err
}
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
indices, err := attestation.AttestingIndices(a, committee)
if err != nil {
return nil, nil, err
}

View File

@@ -211,7 +211,7 @@ func TestProcessAttestations(t *testing.T) {
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, att1.Data.Slot, att1.Data.CommitteeIndex)
require.NoError(t, err)
indices, err := attestation.AttestingIndices(att1.AggregationBits, committee)
indices, err := attestation.AttestingIndices(att1, committee)
require.NoError(t, err)
for _, i := range indices {
if !pVals[i].IsPrevEpochAttester {
@@ -220,7 +220,7 @@ func TestProcessAttestations(t *testing.T) {
}
committee, err = helpers.BeaconCommitteeFromState(context.Background(), beaconState, att2.Data.Slot, att2.Data.CommitteeIndex)
require.NoError(t, err)
indices, err = attestation.AttestingIndices(att2.AggregationBits, committee)
indices, err = attestation.AttestingIndices(att2, committee)
require.NoError(t, err)
for _, i := range indices {
assert.Equal(t, true, pVals[i].IsPrevEpochAttester, "Not a prev epoch attester")

View File

@@ -11,7 +11,6 @@ go_library(
deps = [
"//async/event:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)

View File

@@ -3,7 +3,6 @@ package operation
import (
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
@@ -33,12 +32,15 @@ const (
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
AttesterSlashingReceived = 8
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
DataColumnSidecarReceived = 9
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
type UnAggregatedAttReceivedData struct {
// Attestation is the unaggregated attestation object.
Attestation interfaces.Attestation
Attestation ethpb.Att
}
// AggregatedAttReceivedData is the data sent with AggregatedAttReceived events.
@@ -76,5 +78,9 @@ type ProposerSlashingReceivedData struct {
// AttesterSlashingReceivedData is the data sent with AttesterSlashingReceived events.
type AttesterSlashingReceivedData struct {
AttesterSlashing interfaces.AttesterSlashing
AttesterSlashing ethpb.AttSlashing
}
type DataColumnSidecarReceivedData struct {
DataColumn *blocks.VerifiedRODataColumn
}

View File

@@ -34,6 +34,7 @@ go_library(
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -73,7 +74,6 @@ go_test(
"//beacon-chain/state/state-native:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//crypto/hash:go_default_library",

View File

@@ -7,7 +7,6 @@ import (
"time"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
@@ -22,7 +21,7 @@ var (
// ValidateNilAttestation checks if any composite field of input attestation is nil.
// Access to these nil fields will result in run time panic,
// it is recommended to run these checks as first line of defense.
func ValidateNilAttestation(attestation interfaces.Attestation) error {
func ValidateNilAttestation(attestation ethpb.Att) error {
if attestation == nil {
return errors.New("attestation can't be nil")
}
@@ -72,7 +71,7 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
// IsAggregated returns true if the attestation is an aggregated attestation,
// false otherwise.
func IsAggregated(attestation interfaces.Attestation) bool {
func IsAggregated(attestation ethpb.Att) bool {
return attestation.GetAggregationBits().Count() > 1
}
@@ -91,7 +90,7 @@ func IsAggregated(attestation interfaces.Attestation) bool {
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
//
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
func ComputeSubnetForAttestation(activeValCount uint64, att interfaces.Attestation) uint64 {
func ComputeSubnetForAttestation(activeValCount uint64, att ethpb.Att) uint64 {
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.GetData().CommitteeIndex, att.GetData().Slot)
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
@@ -239,7 +238,7 @@ func TestVerifyCheckpointEpoch_Ok(t *testing.T) {
func TestValidateNilAttestation(t *testing.T) {
tests := []struct {
name string
attestation interfaces.Attestation
attestation ethpb.Att
errString string
}{
{

View File

@@ -15,12 +15,13 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/container/slice"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v5/math"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -58,6 +59,29 @@ func SlotCommitteeCount(activeValidatorCount uint64) uint64 {
return committeesPerSlot
}
// AttestationCommittees returns beacon state committees that reflect attestation's committee indices.
func AttestationCommittees(ctx context.Context, st state.ReadOnlyBeaconState, att ethpb.Att) ([][]primitives.ValidatorIndex, error) {
var committees [][]primitives.ValidatorIndex
if att.Version() >= version.Electra {
committeeIndices := att.CommitteeBitsVal().BitIndices()
committees = make([][]primitives.ValidatorIndex, len(committeeIndices))
for i, ci := range committeeIndices {
committee, err := BeaconCommitteeFromState(ctx, st, att.GetData().Slot, primitives.CommitteeIndex(ci))
if err != nil {
return nil, err
}
committees[i] = committee
}
} else {
committee, err := BeaconCommitteeFromState(ctx, st, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return nil, err
}
committees = [][]primitives.ValidatorIndex{committee}
}
return committees, nil
}
// BeaconCommitteeFromState returns the crosslink committee of a given slot and committee index. This
// is a spec implementation where state is used as an argument. In case of state retrieval
// becomes expensive, consider using BeaconCommittee below.
@@ -143,105 +167,137 @@ func BeaconCommittee(
return ComputeCommittee(validatorIndices, seed, indexOffset, count)
}
// CommitteeAssignmentContainer represents a committee list, committee index, and to be attested slot for a given epoch.
type CommitteeAssignmentContainer struct {
// CommitteeAssignment represents committee list, committee index, and to be attested slot for a given epoch.
type CommitteeAssignment struct {
Committee []primitives.ValidatorIndex
AttesterSlot primitives.Slot
CommitteeIndex primitives.CommitteeIndex
}
// CommitteeAssignments is a map of validator indices pointing to the appropriate committee
// assignment for the given epoch.
//
// 1. Determine the proposer validator index for each slot.
// 2. Compute all committees.
// 3. Determine the attesting slot for each committee.
// 4. Construct a map of validator indices pointing to the respective committees.
func CommitteeAssignments(
ctx context.Context,
state state.BeaconState,
epoch primitives.Epoch,
) (map[primitives.ValidatorIndex]*CommitteeAssignmentContainer, map[primitives.ValidatorIndex][]primitives.Slot, error) {
// verifyAssignmentEpoch verifies if the given epoch is valid for assignment based on the provided state.
// It checks if the epoch is not greater than the next epoch, and if the start slot of the epoch is greater
// than or equal to the minimum valid start slot calculated based on the state's current slot and historical roots.
func verifyAssignmentEpoch(epoch primitives.Epoch, state state.BeaconState) error {
nextEpoch := time.NextEpoch(state)
if epoch > nextEpoch {
return nil, nil, fmt.Errorf(
"epoch %d can't be greater than next epoch %d",
epoch,
nextEpoch,
)
return fmt.Errorf("epoch %d can't be greater than next epoch %d", epoch, nextEpoch)
}
// We determine the slots in which proposers are supposed to act.
// Some validators may need to propose multiple times per epoch, so
// we use a map of proposer idx -> []slot to keep track of this possibility.
startSlot, err := slots.EpochStart(epoch)
if err != nil {
return nil, nil, err
return err
}
minValidStartSlot := primitives.Slot(0)
if state.Slot() >= params.BeaconConfig().SlotsPerHistoricalRoot {
minValidStartSlot = state.Slot() - params.BeaconConfig().SlotsPerHistoricalRoot
if stateSlot := state.Slot(); stateSlot >= params.BeaconConfig().SlotsPerHistoricalRoot {
minValidStartSlot = stateSlot - params.BeaconConfig().SlotsPerHistoricalRoot
}
if startSlot < minValidStartSlot {
return nil, nil, fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
return fmt.Errorf("start slot %d is smaller than the minimum valid start slot %d", startSlot, minValidStartSlot)
}
return nil
}
// ProposerAssignments calculates proposer assignments for each validator during the specified epoch.
// It verifies the validity of the epoch, then iterates through each slot in the epoch to determine the
// proposer for that slot and assigns them accordingly.
func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch) (map[primitives.ValidatorIndex][]primitives.Slot, error) {
// Verify if the epoch is valid for assignment based on the provided state.
if err := verifyAssignmentEpoch(epoch, state); err != nil {
return nil, err
}
startSlot, err := slots.EpochStart(epoch)
if err != nil {
return nil, err
}
proposerIndexToSlots := make(map[primitives.ValidatorIndex][]primitives.Slot, params.BeaconConfig().SlotsPerEpoch)
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
originalStateSlot := state.Slot()
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Skip proposer assignment for genesis slot.
if slot == 0 {
continue
}
// Set the state's current slot.
if err := state.SetSlot(slot); err != nil {
return nil, nil, err
return nil, err
}
// Determine the proposer index for the current slot.
i, err := BeaconProposerIndex(ctx, state)
if err != nil {
return nil, nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
}
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
// Append the slot to the proposer's assignments.
if _, ok := proposerAssignments[i]; !ok {
proposerAssignments[i] = make([]primitives.Slot, 0)
}
proposerAssignments[i] = append(proposerAssignments[i], slot)
}
// If previous proposer indices computation is outside if current proposal epoch range,
// we need to reset state slot back to start slot so that we can compute the correct committees.
currentProposalEpoch := epoch < nextEpoch
if !currentProposalEpoch {
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
return nil, nil, err
}
// Reset state back to its original slot.
if err := state.SetSlot(originalStateSlot); err != nil {
return nil, err
}
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
return proposerAssignments, nil
}
// CommitteeAssignments calculates committee assignments for each validator during the specified epoch.
// It retrieves active validator indices, determines the number of committees per slot, and computes
// assignments for each validator based on their presence in the provided validators slice.
func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch primitives.Epoch, validators []primitives.ValidatorIndex) (map[primitives.ValidatorIndex]*CommitteeAssignment, error) {
// Verify if the epoch is valid for assignment based on the provided state.
if err := verifyAssignmentEpoch(epoch, state); err != nil {
return nil, err
}
// Retrieve active validator count for the specified epoch.
activeValidatorCount, err := ActiveValidatorCount(ctx, state, epoch)
if err != nil {
return nil, nil, err
return nil, err
}
// Each slot in an epoch has a different set of committees. This value is derived from the
// active validator set, which does not change.
numCommitteesPerSlot := SlotCommitteeCount(uint64(len(activeValidatorIndices)))
validatorIndexToCommittee := make(map[primitives.ValidatorIndex]*CommitteeAssignmentContainer, len(activeValidatorIndices))
// Compute all committees for all slots.
for i := primitives.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
// Compute committees.
// Determine the number of committees per slot based on the number of active validator indices.
numCommitteesPerSlot := SlotCommitteeCount(activeValidatorCount)
startSlot, err := slots.EpochStart(epoch)
if err != nil {
return nil, err
}
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
vals := make(map[primitives.ValidatorIndex]struct{})
for _, v := range validators {
vals[v] = struct{}{}
}
// Compute committee assignments for each slot in the epoch.
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Compute committees for the current slot.
for j := uint64(0); j < numCommitteesPerSlot; j++ {
slot := startSlot + i
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j) /*committee index*/)
committee, err := BeaconCommitteeFromState(ctx, state, slot, primitives.CommitteeIndex(j))
if err != nil {
return nil, nil, err
return nil, err
}
cac := &CommitteeAssignmentContainer{
Committee: committee,
CommitteeIndex: primitives.CommitteeIndex(j),
AttesterSlot: slot,
}
for _, vIndex := range committee {
validatorIndexToCommittee[vIndex] = cac
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
continue
}
if _, ok := assignments[vIndex]; !ok {
assignments[vIndex] = &CommitteeAssignment{}
}
assignments[vIndex].Committee = committee
assignments[vIndex].AttesterSlot = slot
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
}
}
}
return validatorIndexToCommittee, proposerIndexToSlots, nil
return assignments, nil
}
// VerifyBitfieldLength verifies that a bitfield length matches the given committee size.
@@ -257,7 +313,7 @@ func VerifyBitfieldLength(bf bitfield.Bitfield, committeeSize uint64) error {
// VerifyAttestationBitfieldLengths verifies that an attestations aggregation bitfields is
// a valid length matching the size of the committee.
func VerifyAttestationBitfieldLengths(ctx context.Context, state state.ReadOnlyBeaconState, att interfaces.Attestation) error {
func VerifyAttestationBitfieldLengths(ctx context.Context, state state.ReadOnlyBeaconState, att ethpb.Att) error {
committee, err := BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
if err != nil {
return errors.Wrap(err, "could not retrieve beacon committees")
@@ -295,6 +351,21 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]pri
return UnshuffleList(indices, seed)
}
// CommitteeIndices return beacon committee indices corresponding to bits that are set on the argument bitfield.
//
// Spec pseudocode definition:
//
// def get_committee_indices(committee_bits: Bitvector) -> Sequence[CommitteeIndex]:
// return [CommitteeIndex(index) for index, bit in enumerate(committee_bits) if bit]
func CommitteeIndices(committeeBits bitfield.Bitfield) []primitives.CommitteeIndex {
indices := committeeBits.BitIndices()
committeeIndices := make([]primitives.CommitteeIndex, len(indices))
for i, ix := range indices {
committeeIndices[i] = primitives.CommitteeIndex(uint64(ix))
}
return committeeIndices
}
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
// list with committee index and epoch number. It caches the shuffled indices for the input epoch.
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, e primitives.Epoch) error {

View File

@@ -104,7 +104,10 @@ func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
Slot: 0, // Epoch 0.
})
require.NoError(t, err)
_, _, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1)
_, err = helpers.CommitteeAssignments(context.Background(), state, epoch+1, nil)
assert.ErrorContains(t, "can't be greater than next epoch", err)
_, err = helpers.ProposerAssignments(context.Background(), state, epoch+1)
assert.ErrorContains(t, "can't be greater than next epoch", err)
}
@@ -128,10 +131,10 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, 0)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
for _, ss := range proposerIndexToSlots {
for _, s := range ss {
assignments, err := helpers.ProposerAssignments(context.Background(), state, 0)
require.NoError(t, err, "Failed to determine Assignments")
for _, slots := range assignments {
for _, s := range slots {
assert.NotEqual(t, uint64(0), s, "No proposer should be assigned to slot 0")
}
}
@@ -140,6 +143,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
// Initialize test with 256 validators, each slot and each index gets 4 validators.
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
validatorIndices := make([]primitives.ValidatorIndex, len(validators))
for i := 0; i < len(validators); i++ {
// First 2 epochs only half validators are activated.
var activationEpoch primitives.Epoch
@@ -150,6 +154,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
ActivationEpoch: activationEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
validatorIndices[i] = primitives.ValidatorIndex(i)
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
@@ -201,14 +206,16 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
helpers.ClearCache()
validatorIndexToCommittee, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
require.NoError(t, err, "Failed to determine CommitteeAssignments")
cac := validatorIndexToCommittee[tt.index]
assignments, err := helpers.CommitteeAssignments(context.Background(), state, slots.ToEpoch(tt.slot), validatorIndices)
require.NoError(t, err, "Failed to determine Assignments")
cac := assignments[tt.index]
assert.Equal(t, tt.committeeIndex, cac.CommitteeIndex, "Unexpected committeeIndex for validator index %d", tt.index)
assert.Equal(t, tt.slot, cac.AttesterSlot, "Unexpected slot for validator index %d", tt.index)
if len(proposerIndexToSlots[tt.index]) > 0 && proposerIndexToSlots[tt.index][0] != tt.proposerSlot {
proposerAssignments, err := helpers.ProposerAssignments(context.Background(), state, slots.ToEpoch(tt.slot))
require.NoError(t, err)
if len(proposerAssignments[tt.index]) > 0 && proposerAssignments[tt.index][0] != tt.proposerSlot {
t.Errorf("wanted proposer slot %d, got proposer slot %d for validator index %d",
tt.proposerSlot, proposerIndexToSlots[tt.index][0], tt.index)
tt.proposerSlot, proposerAssignments[tt.index][0], tt.index)
}
assert.DeepEqual(t, tt.committee, cac.Committee, "Unexpected committee for validator index %d", tt.index)
})
@@ -238,13 +245,13 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, proposerIndxs, err := helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state))
assignments, err := helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state))
require.NoError(t, err)
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set")
_, proposerIndxs, err = helpers.CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
assignments, err = helpers.ProposerAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
require.NoError(t, err)
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
require.NotEqual(t, 0, len(assignments), "wanted non-zero proposer index set")
}
func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *testing.T) {
@@ -264,7 +271,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
_, _, err = helpers.CommitteeAssignments(context.Background(), state, 0)
_, err = helpers.CommitteeAssignments(context.Background(), state, 0, nil)
require.ErrorContains(t, "start slot 0 is smaller than the minimum valid start slot 1", err)
}
@@ -286,12 +293,12 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
})
require.NoError(t, err)
epoch := primitives.Epoch(1)
_, proposerIndexToSlots, err := helpers.CommitteeAssignments(context.Background(), state, epoch)
require.NoError(t, err, "Failed to determine CommitteeAssignments")
assignments, err := helpers.ProposerAssignments(context.Background(), state, epoch)
require.NoError(t, err, "Failed to determine Assignments")
slotsWithProposers := make(map[primitives.Slot]bool)
for _, proposerSlots := range proposerIndexToSlots {
for _, slot := range proposerSlots {
for _, slots := range assignments {
for _, slot := range slots {
slotsWithProposers[slot] = true
}
}
@@ -699,3 +706,46 @@ func TestPrecomputeProposerIndices_Ok(t *testing.T) {
}
assert.DeepEqual(t, wantedProposerIndices, proposerIndices, "Did not precompute proposer indices correctly")
}
func TestCommitteeIndices(t *testing.T) {
bitfield := bitfield.NewBitvector4()
bitfield.SetBitAt(0, true)
bitfield.SetBitAt(1, true)
bitfield.SetBitAt(3, true)
indices := helpers.CommitteeIndices(bitfield)
assert.DeepEqual(t, []primitives.CommitteeIndex{0, 1, 3}, indices)
}
func TestAttestationCommittees(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
t.Run("pre-Electra", func(t *testing.T) {
att := &ethpb.Attestation{Data: &ethpb.AttestationData{CommitteeIndex: 0}}
committees, err := helpers.AttestationCommittees(context.Background(), state, att)
require.NoError(t, err)
require.Equal(t, 1, len(committees))
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
})
t.Run("post-Electra", func(t *testing.T) {
bits := primitives.NewAttestationCommitteeBits()
bits.SetBitAt(0, true)
bits.SetBitAt(1, true)
att := &ethpb.AttestationElectra{CommitteeBits: bits, Data: &ethpb.AttestationData{}}
committees, err := helpers.AttestationCommittees(context.Background(), state, att)
require.NoError(t, err)
require.Equal(t, 2, len(committees))
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[1])))
})
}

View File

@@ -2,6 +2,7 @@ package helpers
import (
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
)
// BalanceChurnLimit for the current active balance, in gwei.
@@ -18,12 +19,12 @@ import (
// get_total_active_balance(state) // CHURN_LIMIT_QUOTIENT
// )
// return churn - churn % EFFECTIVE_BALANCE_INCREMENT
func BalanceChurnLimit(activeBalanceGwei uint64) uint64 {
func BalanceChurnLimit(activeBalance primitives.Gwei) primitives.Gwei {
churn := max(
params.BeaconConfig().MinPerEpochChurnLimitElectra,
(activeBalanceGwei / params.BeaconConfig().ChurnLimitQuotient),
(uint64(activeBalance) / params.BeaconConfig().ChurnLimitQuotient),
)
return churn - churn%params.BeaconConfig().EffectiveBalanceIncrement
return primitives.Gwei(churn - churn%params.BeaconConfig().EffectiveBalanceIncrement)
}
// ActivationExitChurnLimit for the current active balance, in gwei.
@@ -36,8 +37,8 @@ func BalanceChurnLimit(activeBalanceGwei uint64) uint64 {
// Return the churn limit for the current epoch dedicated to activations and exits.
// """
// return min(MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT, get_balance_churn_limit(state))
func ActivationExitChurnLimit(activeBalanceGwei uint64) uint64 {
return min(params.BeaconConfig().MaxPerEpochActivationExitChurnLimit, BalanceChurnLimit(activeBalanceGwei))
func ActivationExitChurnLimit(activeBalance primitives.Gwei) primitives.Gwei {
return min(primitives.Gwei(params.BeaconConfig().MaxPerEpochActivationExitChurnLimit), BalanceChurnLimit(activeBalance))
}
// ConsolidationChurnLimit for the current active balance, in gwei.
@@ -47,6 +48,6 @@ func ActivationExitChurnLimit(activeBalanceGwei uint64) uint64 {
//
// def get_consolidation_churn_limit(state: BeaconState) -> Gwei:
// return get_balance_churn_limit(state) - get_activation_exit_churn_limit(state)
func ConsolidationChurnLimit(activeBalanceGwei uint64) uint64 {
return BalanceChurnLimit(activeBalanceGwei) - ActivationExitChurnLimit(activeBalanceGwei)
func ConsolidationChurnLimit(activeBalance primitives.Gwei) primitives.Gwei {
return BalanceChurnLimit(activeBalance) - ActivationExitChurnLimit(activeBalance)
}

View File

@@ -5,29 +5,30 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
)
func TestBalanceChurnLimit(t *testing.T) {
tests := []struct {
name string
activeBalance uint64
expected uint64
activeBalance primitives.Gwei
expected primitives.Gwei
}{
{
name: "less than MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA",
activeBalance: 111,
expected: params.BeaconConfig().MinPerEpochChurnLimitElectra,
expected: primitives.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
},
{
name: "modulo EFFECTIVE_BALANCE_INCREMENT",
activeBalance: 111 + params.BeaconConfig().MinPerEpochChurnLimitElectra*params.BeaconConfig().ChurnLimitQuotient,
expected: params.BeaconConfig().MinPerEpochChurnLimitElectra,
activeBalance: primitives.Gwei(111 + params.BeaconConfig().MinPerEpochChurnLimitElectra*params.BeaconConfig().ChurnLimitQuotient),
expected: primitives.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
},
{
name: "more than MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA",
activeBalance: 2000 * params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().ChurnLimitQuotient,
expected: 2000 * params.BeaconConfig().EffectiveBalanceIncrement,
activeBalance: primitives.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().ChurnLimitQuotient),
expected: primitives.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement),
},
}
@@ -41,18 +42,18 @@ func TestBalanceChurnLimit(t *testing.T) {
func TestActivationExitChurnLimit(t *testing.T) {
tests := []struct {
name string
activeBalance uint64
expected uint64
activeBalance primitives.Gwei
expected primitives.Gwei
}{
{
name: "less than MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT",
activeBalance: 1,
expected: params.BeaconConfig().MinPerEpochChurnLimitElectra,
expected: primitives.Gwei(params.BeaconConfig().MinPerEpochChurnLimitElectra),
},
{
name: "more than MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT",
activeBalance: 2000 * params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().ChurnLimitQuotient,
expected: params.BeaconConfig().MaxPerEpochActivationExitChurnLimit,
activeBalance: primitives.Gwei(2000 * params.BeaconConfig().EffectiveBalanceIncrement * params.BeaconConfig().ChurnLimitQuotient),
expected: primitives.Gwei(params.BeaconConfig().MaxPerEpochActivationExitChurnLimit),
},
}
@@ -66,6 +67,6 @@ func TestActivationExitChurnLimit(t *testing.T) {
// FuzzConsolidationChurnLimit exercises BalanceChurnLimit and ActivationExitChurnLimit
func FuzzConsolidationChurnLimit(f *testing.F) {
f.Fuzz(func(t *testing.T, activeBalance uint64) {
helpers.ConsolidationChurnLimit(activeBalance)
helpers.ConsolidationChurnLimit(primitives.Gwei(activeBalance))
})
}

View File

@@ -12,6 +12,7 @@ import (
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
@@ -520,11 +521,11 @@ func isETH1WithdrawalCredential(creds []byte) bool {
// Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal credential.
// """
// return is_compounding_withdrawal_credential(validator.withdrawal_credentials)
func HasCompoundingWithdrawalCredential(v *ethpb.Validator) bool {
func HasCompoundingWithdrawalCredential(v interfaces.WithWithdrawalCredentials) bool {
if v == nil {
return false
}
return isCompoundingWithdrawalCredential(v.WithdrawalCredentials)
return isCompoundingWithdrawalCredential(v.GetWithdrawalCredentials())
}
// isCompoundingWithdrawalCredential checks if the credentials are a compounding withdrawal credential.
@@ -673,3 +674,68 @@ func ValidatorMaxEffectiveBalance(val *ethpb.Validator) uint64 {
}
return params.BeaconConfig().MinActivationBalance
}
// QueueExcessActiveBalance queues validators with balances above the min activation balance and adds to pending balance deposit.
//
// Spec definition:
//
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
// balance = state.balances[index]
// if balance > MIN_ACTIVATION_BALANCE:
// excess_balance = balance - MIN_ACTIVATION_BALANCE
// state.balances[index] = MIN_ACTIVATION_BALANCE
// state.pending_balance_deposits.append(
// PendingBalanceDeposit(index=index, amount=excess_balance)
// )
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
bal, err := s.BalanceAtIndex(idx)
if err != nil {
return err
}
if bal > params.BeaconConfig().MinActivationBalance {
excessBalance := bal - params.BeaconConfig().MinActivationBalance
if err := s.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
return err
}
return s.AppendPendingBalanceDeposit(idx, excessBalance)
}
return nil
}
// QueueEntireBalanceAndResetValidator queues the entire balance and resets the validator. This is used in electra fork logic.
//
// Spec definition:
//
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
// balance = state.balances[index]
// validator = state.validators[index]
// state.balances[index] = 0
// validator.effective_balance = 0
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
// state.pending_balance_deposits.append(
// PendingBalanceDeposit(index=index, amount=balance)
// )
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
bal, err := s.BalanceAtIndex(idx)
if err != nil {
return err
}
if err := s.UpdateBalancesAtIndex(idx, 0); err != nil {
return err
}
v, err := s.ValidatorAtIndex(idx)
if err != nil {
return err
}
v.EffectiveBalance = 0
v.ActivationEligibilityEpoch = params.BeaconConfig().FarFutureEpoch
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
return err
}
return s.AppendPendingBalanceDeposit(idx, bal)
}

View File

@@ -18,6 +18,7 @@ import (
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
)
func TestIsActiveValidator_OK(t *testing.T) {
@@ -1119,3 +1120,40 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
// Sanity check that MinActivationBalance equals (pre-electra) MaxEffectiveBalance
assert.Equal(t, params.BeaconConfig().MinActivationBalance, params.BeaconConfig().MaxEffectiveBalance)
}
func TestQueueExcessActiveBalance_Ok(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
bals := st.Balances()
bals[0] = params.BeaconConfig().MinActivationBalance + 1000
require.NoError(t, st.SetBalances(bals))
err := helpers.QueueExcessActiveBalance(st, 0)
require.NoError(t, err)
pbd, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, uint64(1000), pbd[0].Amount)
bals = st.Balances()
require.Equal(t, params.BeaconConfig().MinActivationBalance, bals[0])
}
func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
val, err := st.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, val.EffectiveBalance)
pbd, err := st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 0, len(pbd))
err = helpers.QueueEntireBalanceAndResetValidator(st, 0)
require.NoError(t, err)
pbd, err = st.PendingBalanceDeposits()
require.NoError(t, err)
require.Equal(t, 1, len(pbd))
val, err = st.ValidatorAtIndex(0)
require.NoError(t, err)
require.Equal(t, uint64(0), val.EffectiveBalance)
}

View File

@@ -0,0 +1,36 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["helpers.go"],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
visibility = ["//visibility:public"],
deps = [
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_holiman_uint256//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["helpers_test.go"],
deps = [
":go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//consensus-types/blocks:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -0,0 +1,304 @@
package peerdas
import (
"encoding/binary"
"math"
cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/holiman/uint256"
errors "github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
// Bytes per cell
const bytesPerCell = cKzg4844.FieldElementsPerCell * cKzg4844.BytesPerFieldElement
var (
// Custom errors
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
errIndexTooLarge = errors.New("column index is larger than the specified number of columns")
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
// maxUint256 is the maximum value of a uint256.
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
)
// CustodyColumnSubnets computes the subnets the node should participate in for custody.
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) ([]uint64, error) {
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
// Check if the custody subnet count is larger than the data column sidecar subnet count.
if custodySubnetCount > dataColumnSidecarSubnetCount {
return nil, errCustodySubnetCountTooLarge
}
one := uint256.NewInt(1)
subnetIds, subnetIdsMap := make([]uint64, 0, custodySubnetCount), make(map[uint64]bool, custodySubnetCount)
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(subnetIds)) < custodySubnetCount; currentId.Add(currentId, one) {
// Convert to big endian bytes.
currentIdBytesBigEndian := currentId.Bytes32()
// Convert to little endian.
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
// Hash the result.
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
// Get the subnet ID.
subnetId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % dataColumnSidecarSubnetCount
// Add the subnet to the slice.
exists := subnetIdsMap[subnetId]
if !exists {
subnetIds = append(subnetIds, subnetId)
subnetIdsMap[subnetId] = true
}
// Overflow prevention.
if currentId.Cmp(maxUint256) == 0 {
currentId = uint256.NewInt(0)
}
}
return subnetIds, nil
}
// CustodyColumns computes the columns the node should custody.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
// Compute the custodied subnets.
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
if err != nil {
return nil, errors.Wrap(err, "custody subnets")
}
columnsPerSubnet := cKzg4844.CellsPerExtBlob / dataColumnSidecarSubnetCount
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
// Columns belonging to the same subnet are contiguous.
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
for i := uint64(0); i < columnsPerSubnet; i++ {
for _, subnetId := range subnetIds {
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
columnIndices[columnIndex] = true
}
}
return columnIndices, nil
}
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []cKzg4844.Blob) ([]*ethpb.DataColumnSidecar, error) {
blobsCount := len(blobs)
if blobsCount == 0 {
return nil, nil
}
// Get the signed block header.
signedBlockHeader, err := signedBlock.Header()
if err != nil {
return nil, errors.Wrap(err, "signed block header")
}
// Get the block body.
block := signedBlock.Block()
blockBody := block.Body()
// Get the blob KZG commitments.
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
// Compute the KZG commitments inclusion proof.
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
if err != nil {
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
}
// Compute cells and proofs.
cells := make([][cKzg4844.CellsPerExtBlob]cKzg4844.Cell, 0, blobsCount)
proofs := make([][cKzg4844.CellsPerExtBlob]cKzg4844.KZGProof, 0, blobsCount)
for i := range blobs {
blob := &blobs[i]
blobCells, blobProofs, err := cKzg4844.ComputeCellsAndKZGProofs(blob)
if err != nil {
return nil, errors.Wrap(err, "compute cells and KZG proofs")
}
cells = append(cells, blobCells)
proofs = append(proofs, blobProofs)
}
// Get the column sidecars.
sidecars := make([]*ethpb.DataColumnSidecar, 0, cKzg4844.CellsPerExtBlob)
for columnIndex := uint64(0); columnIndex < cKzg4844.CellsPerExtBlob; columnIndex++ {
column := make([]cKzg4844.Cell, 0, blobsCount)
kzgProofOfColumn := make([]cKzg4844.KZGProof, 0, blobsCount)
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
cell := cells[rowIndex][columnIndex]
column = append(column, cell)
kzgProof := proofs[rowIndex][columnIndex]
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
}
columnBytes := make([][]byte, 0, blobsCount)
for i := range column {
cell := column[i]
cellBytes := make([]byte, 0, bytesPerCell)
for _, fieldElement := range cell {
copiedElem := fieldElement
cellBytes = append(cellBytes, copiedElem[:]...)
}
columnBytes = append(columnBytes, cellBytes)
}
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
for _, kzgProof := range kzgProofOfColumn {
copiedProof := kzgProof
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
}
sidecar := &ethpb.DataColumnSidecar{
ColumnIndex: columnIndex,
DataColumn: columnBytes,
KzgCommitments: blobKzgCommitments,
KzgProof: kzgProofOfColumnBytes,
SignedBlockHeader: signedBlockHeader,
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
}
sidecars = append(sidecars, sidecar)
}
return sidecars, nil
}
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
// It is scheduled for deletion.
func DataColumnSidecarsForReconstruct(
blobKzgCommitments [][]byte,
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
kzgCommitmentsInclusionProof [][]byte,
blobs []cKzg4844.Blob,
) ([]*ethpb.DataColumnSidecar, error) {
blobsCount := len(blobs)
if blobsCount == 0 {
return nil, nil
}
// Compute cells and proofs.
cells := make([][cKzg4844.CellsPerExtBlob]cKzg4844.Cell, 0, blobsCount)
proofs := make([][cKzg4844.CellsPerExtBlob]cKzg4844.KZGProof, 0, blobsCount)
for i := range blobs {
blob := &blobs[i]
blobCells, blobProofs, err := cKzg4844.ComputeCellsAndKZGProofs(blob)
if err != nil {
return nil, errors.Wrap(err, "compute cells and KZG proofs")
}
cells = append(cells, blobCells)
proofs = append(proofs, blobProofs)
}
// Get the column sidecars.
sidecars := make([]*ethpb.DataColumnSidecar, 0, cKzg4844.CellsPerExtBlob)
for columnIndex := uint64(0); columnIndex < cKzg4844.CellsPerExtBlob; columnIndex++ {
column := make([]cKzg4844.Cell, 0, blobsCount)
kzgProofOfColumn := make([]cKzg4844.KZGProof, 0, blobsCount)
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
cell := cells[rowIndex][columnIndex]
column = append(column, cell)
kzgProof := proofs[rowIndex][columnIndex]
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
}
columnBytes := make([][]byte, 0, blobsCount)
for i := range column {
cell := column[i]
cellBytes := make([]byte, 0, bytesPerCell)
for _, fieldElement := range cell {
copiedElem := fieldElement
cellBytes = append(cellBytes, copiedElem[:]...)
}
columnBytes = append(columnBytes, cellBytes)
}
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
for _, kzgProof := range kzgProofOfColumn {
copiedProof := kzgProof
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
}
sidecar := &ethpb.DataColumnSidecar{
ColumnIndex: columnIndex,
DataColumn: columnBytes,
KzgCommitments: blobKzgCommitments,
KzgProof: kzgProofOfColumnBytes,
SignedBlockHeader: signedBlockHeader,
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
}
sidecars = append(sidecars, sidecar)
}
return sidecars, nil
}
// VerifyDataColumnSidecarKZGProofs verifies the provided KZG Proofs for the particular
// data column.
func VerifyDataColumnSidecarKZGProofs(sc *ethpb.DataColumnSidecar) (bool, error) {
if sc.ColumnIndex >= params.BeaconConfig().NumberOfColumns {
return false, errIndexTooLarge
}
if len(sc.DataColumn) != len(sc.KzgCommitments) || len(sc.KzgCommitments) != len(sc.KzgProof) {
return false, errMismatchLength
}
blobsCount := len(sc.DataColumn)
rowIdx := make([]uint64, 0, blobsCount)
colIdx := make([]uint64, 0, blobsCount)
for i := 0; i < len(sc.DataColumn); i++ {
copiedI := uint64(i)
rowIdx = append(rowIdx, copiedI)
colI := sc.ColumnIndex
colIdx = append(colIdx, colI)
}
ckzgComms := make([]cKzg4844.Bytes48, 0, len(sc.KzgCommitments))
for _, com := range sc.KzgCommitments {
ckzgComms = append(ckzgComms, cKzg4844.Bytes48(com))
}
var cells []cKzg4844.Cell
for _, ce := range sc.DataColumn {
var newCell []cKzg4844.Bytes32
for i := 0; i < len(ce); i += 32 {
newCell = append(newCell, cKzg4844.Bytes32(ce[i:i+32]))
}
cells = append(cells, cKzg4844.Cell(newCell))
}
var proofs []cKzg4844.Bytes48
for _, p := range sc.KzgProof {
proofs = append(proofs, cKzg4844.Bytes48(p))
}
return cKzg4844.VerifyCellKZGProofBatch(ckzgComms, rowIdx, colIdx, cells, proofs)
}

View File

@@ -0,0 +1,91 @@
package peerdas_test
import (
"bytes"
"crypto/sha256"
"encoding/binary"
"fmt"
"testing"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
GoKZG "github.com/crate-crypto/go-kzg-4844"
ckzg4844 "github.com/ethereum/c-kzg-4844/bindings/go"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/testing/util"
"github.com/sirupsen/logrus"
)
func deterministicRandomness(seed int64) [32]byte {
// Converts an int64 to a byte slice
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, seed)
if err != nil {
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
return [32]byte{}
}
bytes := buf.Bytes()
return sha256.Sum256(bytes)
}
// Returns a serialized random field element in big-endian
func GetRandFieldElement(seed int64) [32]byte {
bytes := deterministicRandomness(seed)
var r fr.Element
r.SetBytes(bytes[:])
return GoKZG.SerializeScalar(r)
}
// Returns a random blob using the passed seed as entropy
func GetRandBlob(seed int64) ckzg4844.Blob {
var blob ckzg4844.Blob
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
fieldElementBytes := GetRandFieldElement(seed + int64(i))
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
}
return blob
}
func GenerateCommitmentAndProof(blob ckzg4844.Blob) (ckzg4844.KZGCommitment, ckzg4844.KZGProof, error) {
commitment, err := ckzg4844.BlobToKZGCommitment(&blob)
if err != nil {
return ckzg4844.KZGCommitment{}, ckzg4844.KZGProof{}, err
}
proof, err := ckzg4844.ComputeBlobKZGProof(&blob, ckzg4844.Bytes48(commitment))
if err != nil {
return ckzg4844.KZGCommitment{}, ckzg4844.KZGProof{}, err
}
return commitment, proof, err
}
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
dbBlock := util.NewBeaconBlockDeneb()
require.NoError(t, kzg.Start())
comms := [][]byte{}
blobs := []ckzg4844.Blob{}
for i := int64(0); i < 6; i++ {
blob := GetRandBlob(i)
commitment, _, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
comms = append(comms, commitment[:])
blobs = append(blobs, blob)
}
dbBlock.Block.Body.BlobKzgCommitments = comms
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
require.NoError(t, err)
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
require.NoError(t, err)
for i, sidecar := range sCars {
verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(sidecar)
require.NoError(t, err)
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
}
}

View File

@@ -90,6 +90,15 @@ func CanUpgradeToDeneb(slot primitives.Slot) bool {
return epochStart && DenebEpoch
}
// CanUpgradeToElectra returns true if the input `slot` can upgrade to Electra.
// Spec code:
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ELECTRA_FORK_EPOCH
func CanUpgradeToElectra(slot primitives.Slot) bool {
epochStart := slots.IsEpochStart(slot)
electraEpoch := slots.ToEpoch(slot) == params.BeaconConfig().ElectraForkEpoch
return epochStart && electraEpoch
}
// CanProcessEpoch checks the eligibility to process epoch.
// The epoch can be processed at the end of the last slot of every epoch.
//

View File

@@ -333,3 +333,38 @@ func TestCanUpgradeToDeneb(t *testing.T) {
})
}
}
func TestCanUpgradeToElectra(t *testing.T) {
params.SetupTestConfigCleanup(t)
bc := params.BeaconConfig()
bc.ElectraForkEpoch = 5
params.OverrideBeaconConfig(bc)
tests := []struct {
name string
slot primitives.Slot
want bool
}{
{
name: "not epoch start",
slot: 1,
want: false,
},
{
name: "not electra epoch",
slot: params.BeaconConfig().SlotsPerEpoch,
want: false,
},
{
name: "electra epoch",
slot: primitives.Slot(params.BeaconConfig().ElectraForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := time.CanUpgradeToElectra(tt.slot); got != tt.want {
t.Errorf("CanUpgradeToElectra() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -19,6 +19,7 @@ go_library(
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/capella:go_default_library",
"//beacon-chain/core/deneb:go_default_library",
"//beacon-chain/core/electra:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/execution:go_default_library",

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/capella"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/deneb"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/execution"
@@ -255,14 +256,18 @@ func ProcessSlots(ctx context.Context, state state.BeaconState, slot primitives.
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, "could not process epoch with optimizations")
}
} else if state.Version() >= version.Altair {
} else if state.Version() <= version.Deneb {
state, err = altair.ProcessEpoch(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, "could not process epoch")
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
}
} else {
return nil, errors.New("beacon state should have a version")
state, err = electra.ProcessEpoch(ctx, state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
}
}
}
if err := state.SetSlot(state.Slot() + 1); err != nil {
@@ -320,6 +325,14 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
return nil, err
}
}
if time.CanUpgradeToElectra(state.Slot()) {
state, err = electra.UpgradeToElectra(state)
if err != nil {
tracing.AnnotateError(span, err)
return nil, err
}
}
return state, nil
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
b "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition/interop"
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
@@ -223,19 +224,29 @@ func ProcessBlockNoVerifyAnySig(
//
// Spec pseudocode definition:
//
// def process_operations(state: BeaconState, body: ReadOnlyBeaconBlockBody) -> None:
// # Verify that outstanding deposits are processed up to the maximum number of deposits
// assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
// # [Modified in Electra:EIP6110]
// # Disable former deposit mechanism once all prior deposits are processed
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
// if state.eth1_deposit_index < eth1_deposit_index_limit:
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
// else:
// assert len(body.deposits) == 0
//
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
// for operation in operations:
// fn(state, operation)
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
// for operation in operations:
// fn(state, operation)
//
// for_ops(body.proposer_slashings, process_proposer_slashing)
// for_ops(body.attester_slashings, process_attester_slashing)
// for_ops(body.attestations, process_attestation)
// for_ops(body.deposits, process_deposit)
// for_ops(body.voluntary_exits, process_voluntary_exit)
// for_ops(body.proposer_slashings, process_proposer_slashing)
// for_ops(body.attester_slashings, process_attester_slashing)
// for_ops(body.attestations, process_attestation) # [Modified in Electra:EIP7549]
// for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
// for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
// # [New in Electra:EIP7002:EIP7251]
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
// for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
func ProcessOperationsNoVerifyAttsSigs(
ctx context.Context,
state state.BeaconState,
@@ -262,6 +273,11 @@ func ProcessOperationsNoVerifyAttsSigs(
if err != nil {
return nil, err
}
case version.Electra:
state, err = electraOperations(ctx, state, beaconBlock)
if err != nil {
return nil, err
}
default:
return nil, errors.New("block does not have correct version")
}
@@ -378,6 +394,72 @@ func VerifyBlobCommitmentCount(blk interfaces.ReadOnlyBeaconBlock) error {
return nil
}
// electraOperations
//
// Spec definition:
//
// def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
// # [Modified in Electra:EIP6110]
// # Disable former deposit mechanism once all prior deposits are processed
// eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
// if state.eth1_deposit_index < eth1_deposit_index_limit:
// assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
// else:
// assert len(body.deposits) == 0
//
// def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
// for operation in operations:
// fn(state, operation)
//
// for_ops(body.proposer_slashings, process_proposer_slashing)
// for_ops(body.attester_slashings, process_attester_slashing)
// for_ops(body.attestations, process_attestation) # [Modified in Electra:EIP7549]
// for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
// for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
// for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
// # [New in Electra:EIP7002:EIP7251]
// for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
// for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
// for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
func electraOperations(
ctx context.Context,
st state.BeaconState,
block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
// 6110 validations are in VerifyOperationLengths
// Electra extends the altair operations.
st, err := altairOperations(ctx, st, block)
if err != nil {
return nil, err
}
b := block.Body()
bod, ok := b.(interfaces.ROBlockBodyElectra)
if !ok {
return nil, errors.New("could not cast block body to electra block body")
}
e, err := bod.Execution()
if err != nil {
return nil, errors.Wrap(err, "could not get execution data from block")
}
exe, ok := e.(interfaces.ExecutionDataElectra)
if !ok {
return nil, errors.New("could not cast execution data to electra execution data")
}
st, err = electra.ProcessExecutionLayerWithdrawRequests(ctx, st, exe.WithdrawalRequests())
if err != nil {
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
}
st, err = electra.ProcessDepositReceipts(ctx, st, exe.DepositReceipts())
if err != nil {
return nil, errors.Wrap(err, "could not process deposit receipts")
}
if err := electra.ProcessConsolidations(ctx, st, bod.Consolidations()); err != nil {
return nil, errors.Wrap(err, "could not process consolidations")
}
return st, nil
}
// This calls altair block operations.
func altairOperations(
ctx context.Context,

View File

@@ -311,7 +311,7 @@ func createFullBlockWithOperations(t *testing.T) (state.BeaconState,
committee, err := helpers.BeaconCommitteeFromState(context.Background(), beaconState, blockAtt.Data.Slot, blockAtt.Data.CommitteeIndex)
assert.NoError(t, err)
attestingIndices, err := attestation.AttestingIndices(blockAtt.AggregationBits, committee)
attestingIndices, err := attestation.AttestingIndices(blockAtt, committee)
require.NoError(t, err)
assert.NoError(t, err)
hashTreeRoot, err = signing.ComputeSigningRoot(blockAtt.Data, domain)
@@ -651,6 +651,20 @@ func TestProcessSlots_ThroughDenebEpoch(t *testing.T) {
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
}
func TestProcessSlots_ThroughElectraEpoch(t *testing.T) {
transition.SkipSlotCache.Disable()
params.SetupTestConfigCleanup(t)
conf := params.BeaconConfig()
conf.ElectraForkEpoch = 5
params.OverrideBeaconConfig(conf)
st, _ := util.DeterministicGenesisStateDeneb(t, params.BeaconConfig().MaxValidatorsPerCommittee)
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10)
require.NoError(t, err)
require.Equal(t, version.Electra, st.Version())
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
}
func TestProcessSlotsUsingNextSlotCache(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1)
r := []byte{'a'}

View File

@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["validator.go"],
srcs = [
"slashing.go",
"validator.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators",
visibility = [
"//beacon-chain:__subpackages__",
@@ -15,7 +18,9 @@ go_library(
"//beacon-chain/state:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
@@ -24,9 +29,12 @@ go_library(
go_test(
name = "go_default_test",
size = "small",
srcs = ["validator_test.go"],
embed = [":go_default_library"],
srcs = [
"slashing_test.go",
"validator_test.go",
],
deps = [
":go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -36,5 +44,6 @@ go_test(
"//runtime/version:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//time/slots:go_default_library",
],
)

View File

@@ -0,0 +1,33 @@
package validators
import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
// SlashingParamsPerVersion returns the slashing parameters for the given state version.
func SlashingParamsPerVersion(v int) (slashingQuotient, proposerRewardQuotient, whistleblowerRewardQuotient uint64, err error) {
cfg := params.BeaconConfig()
switch v {
case version.Phase0:
slashingQuotient = cfg.MinSlashingPenaltyQuotient
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
case version.Altair:
slashingQuotient = cfg.MinSlashingPenaltyQuotientAltair
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
case version.Bellatrix, version.Capella, version.Deneb:
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
case version.Electra:
slashingQuotient = cfg.MinSlashingPenaltyQuotientElectra
proposerRewardQuotient = cfg.ProposerRewardQuotient
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotientElectra
default:
err = errors.New("unknown state version")
}
return
}

View File

@@ -0,0 +1,18 @@
package validators_test
import (
"testing"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
)
func TestSlashingParamsPerVersion_NoErrors(t *testing.T) {
for _, v := range version.All() {
_, _, _, err := validators.SlashingParamsPerVersion(v)
if err != nil {
// If this test is failing, you need to add a case for the version in slashingParamsPerVersion.
t.Errorf("Error occurred for version %d: %v", v, err)
}
}
}

View File

@@ -13,7 +13,9 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/math"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
@@ -43,34 +45,26 @@ func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, c
// InitiateValidatorExit takes in validator index and updates
// validator with correct voluntary exit parameters.
// Note: As of Electra, the exitQueueEpoch and churn parameters are unused.
//
// Spec pseudocode definition:
//
// def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None:
// """
// Initiate the exit of the validator with index ``index``.
// """
// # Return if validator already initiated exit
// validator = state.validators[index]
// if validator.exit_epoch != FAR_FUTURE_EPOCH:
// return
// """
// Initiate the exit of the validator with index ``index``.
// """
// # Return if validator already initiated exit
// validator = state.validators[index]
// if validator.exit_epoch != FAR_FUTURE_EPOCH:
// return
//
// # Compute exit queue epoch
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
// if exit_queue_churn >= get_validator_churn_limit(state):
// exit_queue_epoch += Epoch(1)
// # Compute exit queue epoch [Modified in Electra:EIP7251]
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
//
// # Set validator exit epoch and withdrawable epoch
// validator.exit_epoch = exit_queue_epoch
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
// # Set validator exit epoch and withdrawable epoch
// validator.exit_epoch = exit_queue_epoch
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex, exitQueueEpoch primitives.Epoch, churn uint64) (state.BeaconState, primitives.Epoch, error) {
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
if exitableEpoch > exitQueueEpoch {
exitQueueEpoch = exitableEpoch
churn = 0
}
validator, err := s.ValidatorAtIndex(idx)
if err != nil {
return nil, 0, err
@@ -78,14 +72,38 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return s, validator.ExitEpoch, ErrValidatorAlreadyExited
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
if err != nil {
return nil, 0, errors.Wrap(err, "could not get active validator count")
}
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
if churn >= currentChurn {
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
// Compute exit queue epoch.
if s.Version() < version.Electra {
// Relevant spec code from deneb:
//
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
// if exit_queue_churn >= get_validator_churn_limit(state):
// exit_queue_epoch += Epoch(1)
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
if exitableEpoch > exitQueueEpoch {
exitQueueEpoch = exitableEpoch
churn = 0
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
if err != nil {
return nil, 0, errors.Wrap(err, "could not get active validator count")
}
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
if churn >= currentChurn {
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
if err != nil {
return nil, 0, err
}
}
} else {
// [Modified in Electra:EIP7251]
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
var err error
exitQueueEpoch, err = s.ExitEpochAndUpdateChurn(primitives.Gwei(validator.EffectiveBalance))
if err != nil {
return nil, 0, err
}
@@ -102,7 +120,8 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
}
// SlashValidator slashes the malicious validator's balance and awards
// the whistleblower's balance.
// the whistleblower's balance. Note: This implementation does not handle an
// optional whistleblower index. The whistleblower index is always the proposer index.
//
// Spec pseudocode definition:
//
@@ -118,22 +137,22 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
// validator.slashed = True
// validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
// state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
// decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT)
// slashing_penalty = validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_EIP7251 # [Modified in EIP7251]
// decrease_balance(state, slashed_index, slashing_penalty)
//
// # Apply proposer and whistleblower rewards
// proposer_index = get_beacon_proposer_index(state)
// if whistleblower_index is None:
// whistleblower_index = proposer_index
// whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
// proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT)
// whistleblower_reward = Gwei(
// validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA) # [Modified in EIP7251]
// proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR)
// increase_balance(state, proposer_index, proposer_reward)
// increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
func SlashValidator(
ctx context.Context,
s state.BeaconState,
slashedIdx primitives.ValidatorIndex,
penaltyQuotient uint64,
proposerRewardQuotient uint64) (state.BeaconState, error) {
slashedIdx primitives.ValidatorIndex) (state.BeaconState, error) {
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
@@ -161,7 +180,17 @@ func SlashValidator(
); err != nil {
return nil, err
}
if err := helpers.DecreaseBalance(s, slashedIdx, validator.EffectiveBalance/penaltyQuotient); err != nil {
slashingQuotient, proposerRewardQuotient, whistleblowerRewardQuotient, err := SlashingParamsPerVersion(s.Version())
if err != nil {
return nil, errors.Wrap(err, "could not get slashing parameters per version")
}
slashingPenalty, err := math.Div64(validator.EffectiveBalance, slashingQuotient)
if err != nil {
return nil, errors.Wrap(err, "failed to compute slashing slashingPenalty")
}
if err := helpers.DecreaseBalance(s, slashedIdx, slashingPenalty); err != nil {
return nil, err
}
@@ -170,14 +199,18 @@ func SlashValidator(
return nil, errors.Wrap(err, "could not get proposer idx")
}
whistleBlowerIdx := proposerIdx
whistleblowerReward := validator.EffectiveBalance / params.BeaconConfig().WhistleBlowerRewardQuotient
proposerReward := whistleblowerReward / proposerRewardQuotient
err = helpers.IncreaseBalance(s, proposerIdx, proposerReward)
whistleblowerReward, err := math.Div64(validator.EffectiveBalance, whistleblowerRewardQuotient)
if err != nil {
return nil, errors.Wrap(err, "failed to compute whistleblowerReward")
}
proposerReward, err := math.Div64(whistleblowerReward, proposerRewardQuotient)
if err != nil {
return nil, errors.Wrap(err, "failed to compute proposer reward")
}
if err := helpers.IncreaseBalance(s, proposerIdx, proposerReward); err != nil {
return nil, err
}
err = helpers.IncreaseBalance(s, whistleBlowerIdx, whistleblowerReward-proposerReward)
if err != nil {
if err := helpers.IncreaseBalance(s, whistleBlowerIdx, whistleblowerReward-proposerReward); err != nil {
return nil, err
}
return s, nil

View File

@@ -1,4 +1,4 @@
package validators
package validators_test
import (
"context"
@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
@@ -13,6 +14,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/testing/assert"
"github.com/prysmaticlabs/prysm/v5/testing/require"
"github.com/prysmaticlabs/prysm/v5/time/slots"
)
func TestHasVoted_OK(t *testing.T) {
@@ -48,8 +50,8 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
newState, epoch, err := InitiateValidatorExit(context.Background(), state, 0, 199, 1)
require.ErrorIs(t, err, ErrValidatorAlreadyExited)
newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, 0, 199, 1)
require.ErrorIs(t, err, validators.ErrValidatorAlreadyExited)
require.Equal(t, exitEpoch, epoch)
v, err := newState.ValidatorAtIndex(0)
require.NoError(t, err)
@@ -67,7 +69,7 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
newState, epoch, err := InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 1)
newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 1)
require.NoError(t, err)
require.Equal(t, exitedEpoch+2, epoch)
v, err := newState.ValidatorAtIndex(idx)
@@ -87,7 +89,7 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
newState, epoch, err := InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 4)
newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, idx, exitedEpoch+2, 4)
require.NoError(t, err)
require.Equal(t, exitedEpoch+3, epoch)
@@ -109,10 +111,58 @@ func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
_, _, err = InitiateValidatorExit(context.Background(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
_, _, err = validators.InitiateValidatorExit(context.Background(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
require.ErrorContains(t, "addition overflows", err)
}
func TestInitiateValidatorExit_ProperExit_Electra(t *testing.T) {
exitedEpoch := primitives.Epoch(100)
idx := primitives.ValidatorIndex(3)
base := &ethpb.BeaconStateElectra{
Slot: slots.UnsafeEpochStart(exitedEpoch + 1),
Validators: []*ethpb.Validator{
{
ExitEpoch: exitedEpoch,
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
},
{
ExitEpoch: exitedEpoch + 1,
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
},
{
ExitEpoch: exitedEpoch + 2,
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
},
{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
},
},
}
state, err := state_native.InitializeFromProtoElectra(base)
require.NoError(t, err)
// Pre-check: Exit balance to consume should be zero.
ebtc, err := state.ExitBalanceToConsume()
require.NoError(t, err)
require.Equal(t, primitives.Gwei(0), ebtc)
newState, epoch, err := validators.InitiateValidatorExit(context.Background(), state, idx, 0, 0) // exitQueueEpoch and churn are not used in electra
require.NoError(t, err)
// Expect that the exit epoch is the next available epoch with max seed lookahead.
want := helpers.ActivationExitEpoch(exitedEpoch + 1)
require.Equal(t, want, epoch)
v, err := newState.ValidatorAtIndex(idx)
require.NoError(t, err)
assert.Equal(t, want, v.ExitEpoch, "Exit epoch was not the highest")
// Check that the exit balance to consume has been updated on the state.
ebtc, err = state.ExitBalanceToConsume()
require.NoError(t, err)
require.NotEqual(t, primitives.Gwei(0), ebtc, "Exit balance to consume was not updated")
}
func TestSlashValidator_OK(t *testing.T) {
validatorCount := 100
registry := make([]*ethpb.Validator, 0, validatorCount)
@@ -141,8 +191,7 @@ func TestSlashValidator_OK(t *testing.T) {
require.NoError(t, err, "Could not get proposer")
proposerBal, err := state.BalanceAtIndex(proposer)
require.NoError(t, err)
cfg := params.BeaconConfig()
slashedState, err := SlashValidator(context.Background(), state, slashedIdx, cfg.MinSlashingPenaltyQuotient, cfg.ProposerRewardQuotient)
slashedState, err := validators.SlashValidator(context.Background(), state, slashedIdx)
require.NoError(t, err, "Could not slash validator")
require.Equal(t, true, slashedState.Version() == version.Phase0)
@@ -167,6 +216,59 @@ func TestSlashValidator_OK(t *testing.T) {
assert.Equal(t, maxBalance-(v.EffectiveBalance/params.BeaconConfig().MinSlashingPenaltyQuotient), bal, "Did not get expected balance for slashed validator")
}
func TestSlashValidator_Electra(t *testing.T) {
validatorCount := 100
registry := make([]*ethpb.Validator, 0, validatorCount)
balances := make([]uint64, 0, validatorCount)
for i := 0; i < validatorCount; i++ {
registry = append(registry, &ethpb.Validator{
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
})
balances = append(balances, params.BeaconConfig().MaxEffectiveBalance)
}
base := &ethpb.BeaconStateElectra{
Validators: registry,
Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector),
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
Balances: balances,
}
state, err := state_native.InitializeFromProtoElectra(base)
require.NoError(t, err)
slashedIdx := primitives.ValidatorIndex(3)
proposer, err := helpers.BeaconProposerIndex(context.Background(), state)
require.NoError(t, err, "Could not get proposer")
proposerBal, err := state.BalanceAtIndex(proposer)
require.NoError(t, err)
slashedState, err := validators.SlashValidator(context.Background(), state, slashedIdx)
require.NoError(t, err, "Could not slash validator")
require.Equal(t, true, slashedState.Version() == version.Electra)
v, err := state.ValidatorAtIndex(slashedIdx)
require.NoError(t, err)
assert.Equal(t, true, v.Slashed, "Validator not slashed despite supposed to being slashed")
assert.Equal(t, time.CurrentEpoch(state)+params.BeaconConfig().EpochsPerSlashingsVector, v.WithdrawableEpoch, "Withdrawable epoch not the expected value")
maxBalance := params.BeaconConfig().MaxEffectiveBalance
slashedBalance := state.Slashings()[state.Slot().Mod(uint64(params.BeaconConfig().EpochsPerSlashingsVector))]
assert.Equal(t, maxBalance, slashedBalance, "Slashed balance isn't the expected amount")
whistleblowerReward := slashedBalance / params.BeaconConfig().WhistleBlowerRewardQuotientElectra
bal, err := state.BalanceAtIndex(proposer)
require.NoError(t, err)
// The proposer is the whistleblower.
assert.Equal(t, proposerBal+whistleblowerReward, bal, "Did not get expected balance for proposer")
bal, err = state.BalanceAtIndex(slashedIdx)
require.NoError(t, err)
v, err = state.ValidatorAtIndex(slashedIdx)
require.NoError(t, err)
assert.Equal(t, maxBalance-(v.EffectiveBalance/params.BeaconConfig().MinSlashingPenaltyQuotientElectra), bal, "Did not get expected balance for slashed validator")
}
func TestActivatedValidatorIndices(t *testing.T) {
tests := []struct {
state *ethpb.BeaconState
@@ -219,7 +321,7 @@ func TestActivatedValidatorIndices(t *testing.T) {
for _, tt := range tests {
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
activatedIndices := ActivatedValidatorIndices(time.CurrentEpoch(s), tt.state.Validators)
activatedIndices := validators.ActivatedValidatorIndices(time.CurrentEpoch(s), tt.state.Validators)
assert.DeepEqual(t, tt.wanted, activatedIndices)
}
}
@@ -273,7 +375,7 @@ func TestSlashedValidatorIndices(t *testing.T) {
for _, tt := range tests {
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
slashedIndices := SlashedValidatorIndices(time.CurrentEpoch(s), tt.state.Validators)
slashedIndices := validators.SlashedValidatorIndices(time.CurrentEpoch(s), tt.state.Validators)
assert.DeepEqual(t, tt.wanted, slashedIndices)
}
}
@@ -335,7 +437,7 @@ func TestExitedValidatorIndices(t *testing.T) {
require.NoError(t, err)
activeCount, err := helpers.ActiveValidatorCount(context.Background(), s, time.PrevEpoch(s))
require.NoError(t, err)
exitedIndices, err := ExitedValidatorIndices(0, tt.state.Validators, activeCount)
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators, activeCount)
require.NoError(t, err)
assert.DeepEqual(t, tt.wanted, exitedIndices)
}
@@ -410,7 +512,7 @@ func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
for _, tt := range tests {
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
epoch, churn := MaxExitEpochAndChurn(s)
epoch, churn := validators.MaxExitEpochAndChurn(s)
require.Equal(t, tt.wantedEpoch, epoch)
require.Equal(t, tt.wantedChurn, churn)
}

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"availability.go",
"availability_columns.go",
"cache.go",
"iface.go",
"mock.go",
@@ -20,6 +21,7 @@ go_library(
"//runtime/logging:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],

View File

@@ -0,0 +1,151 @@
package das
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/p2p/enode"
errors "github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v5/config/params"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v5/runtime/version"
"github.com/prysmaticlabs/prysm/v5/time/slots"
log "github.com/sirupsen/logrus"
)
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
// block, at which time they will undergo full verification and be saved to the disk.
type LazilyPersistentStoreColumn struct {
store *filesystem.BlobStorage
cache *cache
verifier ColumnBatchVerifier
nodeID enode.ID
}
type ColumnBatchVerifier interface {
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, sc []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
}
func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage, verifier ColumnBatchVerifier, id enode.ID) *LazilyPersistentStoreColumn {
return &LazilyPersistentStoreColumn{
store: store,
cache: newCache(),
verifier: verifier,
nodeID: id,
}
}
// TODO: Very Ugly, change interface to allow for columns and blobs
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
return nil
}
// PersistColumns adds columns to the working column cache. columns stored in this cache will be persisted
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
// by the given block are guaranteed to be persisted for the remainder of the retention period.
func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc ...blocks.RODataColumn) error {
if len(sc) == 0 {
return nil
}
if len(sc) > 1 {
first := sc[0].BlockRoot()
for i := 1; i < len(sc); i++ {
if first != sc[i].BlockRoot() {
return errMixedRoots
}
}
}
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
return nil
}
key := keyFromColumn(sc[0])
entry := s.cache.ensure(key)
for i := range sc {
if err := entry.stashColumns(&sc[i]); err != nil {
return err
}
}
return nil
}
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
// BlobSidecars already in the db are assumed to have been previously verified against the block.
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
blockCommitments, err := fullCommitmentsToCheck(b, current)
if err != nil {
return errors.Wrapf(err, "could check data availability for block %#x", b.Root())
}
// Return early for blocks that are pre-deneb or which do not have any commitments.
if blockCommitments.count() == 0 {
return nil
}
key := keyFromBlock(b)
entry := s.cache.ensure(key)
defer s.cache.delete(key)
root := b.Root()
sumz, err := s.store.WaitForSummarizer(ctx)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
WithError(err).
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
} else {
entry.setDiskSummary(sumz.Summary(root))
}
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.
sidecars, err := entry.filterColumns(root, blockCommitments)
if err != nil {
return errors.Wrap(err, "incomplete BlobSidecar batch")
}
// Do thorough verifications of each BlobSidecar for the block.
// Same as above, we don't save BlobSidecars if there are any problems with the batch.
vscs, err := s.verifier.VerifiedRODataColumns(ctx, b, sidecars)
if err != nil {
var me verification.VerificationMultiError
ok := errors.As(err, &me)
if ok {
fails := me.Failures()
lf := make(log.Fields, len(fails))
for i := range fails {
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
}
log.WithFields(lf).
Debug("invalid ColumnSidecars received")
}
return errors.Wrapf(err, "invalid ColumnSidecars received for block %#x", root)
}
// Ensure that each column sidecar is written to disk.
for i := range vscs {
if err := s.store.SaveDataColumn(vscs[i]); err != nil {
return errors.Wrapf(err, "failed to save ColumnSidecar index %d for block %#x", vscs[i].ColumnIndex, root)
}
}
// All ColumnSidecars are persisted - da check succeeds.
return nil
}
func fullCommitmentsToCheck(b blocks.ROBlock, current primitives.Slot) (safeCommitmentsArray, error) {
var ar safeCommitmentsArray
if b.Version() < version.Deneb {
return ar, nil
}
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
return ar, nil
}
kc, err := b.Block().Body().BlobKzgCommitments()
if err != nil {
return ar, err
}
for i := range ar {
copy(ar[i], kc)
}
return ar, nil
}

View File

@@ -2,6 +2,7 @@ package das
import (
"bytes"
"reflect"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
@@ -38,6 +39,10 @@ func keyFromSidecar(sc blocks.ROBlob) cacheKey {
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
}
func keyFromColumn(sc blocks.RODataColumn) cacheKey {
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
}
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
func keyFromBlock(b blocks.ROBlock) cacheKey {
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
@@ -61,6 +66,7 @@ func (c *cache) delete(key cacheKey) {
// cacheEntry holds a fixed-length cache of BlobSidecars.
type cacheEntry struct {
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
colScs [fieldparams.NumberOfColumns]*blocks.RODataColumn
diskSummary filesystem.BlobStorageSummary
}
@@ -82,6 +88,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
return nil
}
func (e *cacheEntry) stashColumns(sc *blocks.RODataColumn) error {
if sc.ColumnIndex >= fieldparams.NumberOfColumns {
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.ColumnIndex)
}
if e.colScs[sc.ColumnIndex] != nil {
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.ColumnIndex, sc.KzgCommitments)
}
e.colScs[sc.ColumnIndex] = sc
return nil
}
// filter evicts sidecars that are not committed to by the block and returns custom
// errors if the cache is missing any of the commitments, or if the commitments in
// the cache do not match those found in the block. If err is nil, then all expected
@@ -117,6 +134,35 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
return scs, nil
}
func (e *cacheEntry) filterColumns(root [32]byte, kc safeCommitmentsArray) ([]blocks.RODataColumn, error) {
if e.diskSummary.AllAvailable(kc.count()) {
return nil, nil
}
scs := make([]blocks.RODataColumn, 0, kc.count())
for i := uint64(0); i < fieldparams.NumberOfColumns; i++ {
// We already have this blob, we don't need to write it or validate it.
if e.diskSummary.HasIndex(i) {
continue
}
if kc[i] == nil {
if e.colScs[i] != nil {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
}
continue
}
if e.colScs[i] == nil {
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
}
if !reflect.DeepEqual(kc[i], e.colScs[i].KzgCommitments) {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.colScs[i].KzgCommitments, kc[i])
}
scs = append(scs, *e.colScs[i])
}
return scs, nil
}
// safeCommitmentArray is a fixed size array of commitment byte slices. This is helpful for avoiding
// gratuitous bounds checks.
type safeCommitmentArray [fieldparams.MaxBlobsPerBlock][]byte
@@ -129,3 +175,14 @@ func (s safeCommitmentArray) count() int {
}
return fieldparams.MaxBlobsPerBlock
}
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
func (s safeCommitmentsArray) count() int {
for i := range s {
if s[i] == nil {
return i
}
}
return fieldparams.NumberOfColumns
}

View File

@@ -13,6 +13,7 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
visibility = ["//visibility:public"],
deps = [
"//async/event:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -12,6 +12,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/async/event"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
@@ -39,8 +40,15 @@ const (
directoryPermissions = 0700
)
// BlobStorageOption is a functional option for configuring a BlobStorage.
type BlobStorageOption func(*BlobStorage) error
type (
// BlobStorageOption is a functional option for configuring a BlobStorage.
BlobStorageOption func(*BlobStorage) error
RootIndexPair struct {
Root [fieldparams.RootLength]byte
Index uint64
}
)
// WithBasePath is a required option that sets the base path of blob storage.
func WithBasePath(base string) BlobStorageOption {
@@ -70,7 +78,10 @@ func WithSaveFsync(fsync bool) BlobStorageOption {
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
// initialized once per beacon node.
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
b := &BlobStorage{}
b := &BlobStorage{
DataColumnFeed: new(event.Feed),
}
for _, o := range opts {
if err := o(b); err != nil {
return nil, errors.Wrap(err, "failed to create blob storage")
@@ -99,6 +110,7 @@ type BlobStorage struct {
fsync bool
fs afero.Fs
pruner *blobPruner
DataColumnFeed *event.Feed
}
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
@@ -221,6 +233,110 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
return nil
}
// SaveDataColumn saves a data column to our local filesystem.
func (bs *BlobStorage) SaveDataColumn(column blocks.VerifiedRODataColumn) error {
startTime := time.Now()
fname := namerForDataColumn(column)
sszPath := fname.path()
exists, err := afero.Exists(bs.fs, sszPath)
if err != nil {
return err
}
if exists {
log.Trace("Ignoring a duplicate data column sidecar save attempt")
return nil
}
if bs.pruner != nil {
hRoot, err := column.SignedBlockHeader.Header.HashTreeRoot()
if err != nil {
return err
}
if err := bs.pruner.notify(hRoot, column.SignedBlockHeader.Header.Slot, column.ColumnIndex); err != nil {
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", hRoot)
}
}
// Serialize the ethpb.DataColumnSidecar to binary data using SSZ.
sidecarData, err := column.MarshalSSZ()
if err != nil {
return errors.Wrap(err, "failed to serialize sidecar data")
} else if len(sidecarData) == 0 {
return errSidecarEmptySSZData
}
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
return err
}
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
partialMoved := false
// Ensure the partial file is deleted.
defer func() {
if partialMoved {
return
}
// It's expected to error if the save is successful.
err = bs.fs.Remove(partPath)
if err == nil {
log.WithFields(logrus.Fields{
"partPath": partPath,
}).Debugf("Removed partial file")
}
}()
// Create a partial file and write the serialized data to it.
partialFile, err := bs.fs.Create(partPath)
if err != nil {
return errors.Wrap(err, "failed to create partial file")
}
n, err := partialFile.Write(sidecarData)
if err != nil {
closeErr := partialFile.Close()
if closeErr != nil {
return closeErr
}
return errors.Wrap(err, "failed to write to partial file")
}
if bs.fsync {
if err := partialFile.Sync(); err != nil {
return err
}
}
if err := partialFile.Close(); err != nil {
return err
}
if n != len(sidecarData) {
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
}
if n == 0 {
return errEmptyBlobWritten
}
// Atomically rename the partial file to its final name.
err = bs.fs.Rename(partPath, sszPath)
if err != nil {
return errors.Wrap(err, "failed to rename partial file to final name")
}
partialMoved = true
// Notify the data column notifier that a new data column has been saved.
bs.DataColumnFeed.Send(RootIndexPair{
Root: column.BlockRoot(),
Index: column.ColumnIndex,
})
// TODO: Use new metrics for data columns
blobsWrittenCounter.Inc()
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
// Get retrieves a single BlobSidecar by its root and index.
// Since BlobStorage only writes blobs that have undergone full verification, the return
// value is always a VerifiedROBlob.
@@ -246,6 +362,20 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
return verification.BlobSidecarNoop(ro)
}
// GetColumn retrieves a single DataColumnSidecar by its root and index.
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
expected := blobNamer{root: root, index: idx}
encoded, err := afero.ReadFile(bs.fs, expected.path())
if err != nil {
return nil, err
}
s := &ethpb.DataColumnSidecar{}
if err := s.UnmarshalSSZ(encoded); err != nil {
return nil, err
}
return s, nil
}
// Remove removes all blobs for a given root.
func (bs *BlobStorage) Remove(root [32]byte) error {
rootDir := blobNamer{root: root}.dir()
@@ -289,6 +419,61 @@ func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]boo
return mask, nil
}
// ColumnIndices retrieve the stored column indexes from our filesystem.
func (bs *BlobStorage) ColumnIndices(root [32]byte) (map[uint64]bool, error) {
custody := make(map[uint64]bool, fieldparams.NumberOfColumns)
// Get all the files in the directory.
rootDir := blobNamer{root: root}.dir()
entries, err := afero.ReadDir(bs.fs, rootDir)
if err != nil {
// If the directory does not exist, we do not custody any columns.
if os.IsNotExist(err) {
return nil, nil
}
return nil, errors.Wrap(err, "read directory")
}
// Iterate over all the entries in the directory.
for _, entry := range entries {
// If the entry is a directory, skip it.
if entry.IsDir() {
continue
}
// If the entry does not have the correct extension, skip it.
name := entry.Name()
if !strings.HasSuffix(name, sszExt) {
continue
}
// The file should be in the `<index>.<extension>` format.
// Skip the file if it does not match the format.
parts := strings.Split(name, ".")
if len(parts) != 2 {
continue
}
// Get the column index from the file name.
columnIndexStr := parts[0]
columnIndex, err := strconv.ParseUint(columnIndexStr, 10, 64)
if err != nil {
return nil, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
}
// If the column index is out of bounds, return an error.
if columnIndex >= fieldparams.NumberOfColumns {
return nil, errors.Wrapf(errIndexOutOfBounds, "invalid index %d", columnIndex)
}
// Mark the column index as in custody.
custody[columnIndex] = true
}
return custody, nil
}
// Clear deletes all files on the filesystem.
func (bs *BlobStorage) Clear() error {
dirs, err := listDir(bs.fs, ".")
@@ -321,6 +506,10 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
}
func namerForDataColumn(col blocks.VerifiedRODataColumn) blobNamer {
return blobNamer{root: col.BlockRoot(), index: col.ColumnIndex}
}
func (p blobNamer) dir() string {
return rootString(p.root)
}

View File

@@ -9,7 +9,7 @@ import (
)
// blobIndexMask is a bitmask representing the set of blob indices that are currently set.
type blobIndexMask [fieldparams.MaxBlobsPerBlock]bool
type blobIndexMask [fieldparams.NumberOfColumns]bool
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
type BlobStorageSummary struct {
@@ -68,9 +68,12 @@ func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
}
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
// TODO: Separate blob index checks from data column index checks
/*
if idx >= fieldparams.MaxBlobsPerBlock {
return errIndexOutOfBounds
}
*/
s.mu.Lock()
defer s.mu.Unlock()
v := s.cache[key]

View File

@@ -9,6 +9,7 @@ import (
)
func TestSlotByRoot_Summary(t *testing.T) {
t.Skip("Use new test for data columns")
var noneSet, allSet, firstSet, lastSet, oneSet blobIndexMask
firstSet[0] = true
lastSet[len(lastSet)-1] = true

Some files were not shown because too many files have changed in this diff Show More