Compare commits

...

46 Commits

Author SHA1 Message Date
Kasey Kirkham
5dd6d8de49 rm deprecated blob type and db methods 2024-01-09 12:28:55 -06:00
Nishant Das
75bbeb61cc Add Detailed Multi Value Metrics (#13429)
* add it

* pingo

* gaz

* remove pingo

* fix for old forks
2024-01-09 05:16:03 +00:00
kasey
5cea6bebb8 minimize syscalls in pruning routine (#13425)
* minimize syscalls in pruning routine

* terence feedback

* Update beacon-chain/db/filesystem/pruner.go

Co-authored-by: Justin Traglia <95511699+jtraglia@users.noreply.github.com>

* pr feedback

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Justin Traglia <95511699+jtraglia@users.noreply.github.com>
2024-01-08 22:31:16 +00:00
Potuz
28596d669b Use proposer index cache for blob verification (#13423)
* Use proposer index cache for blob verification

* add unit test

* Fix test

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2024-01-07 03:24:07 +00:00
kasey
0e043d55b4 VerifiedROBlobs in initial-sync (#13351)
* Use VerifiedROBlobs in initial-sync

* Update beacon-chain/das/cache.go

Co-authored-by: Justin Traglia <95511699+jtraglia@users.noreply.github.com>

* Apply suggestions from code review

comment fixes

Co-authored-by: Justin Traglia <95511699+jtraglia@users.noreply.github.com>

* fix lint error from gh web ui

* deepsource fixes

* more deepsource

* fix init wiring

* mark blobless blocks verified in batch mode

* move sig check after parent checks

* validate block commitment length at start of da check

* remove vestigial locking

* rm more copy-locksta

* rm old comment

* fail the entire batch if any sidecar fails

* lint

* skip redundant checks, fix len check

* assume sig and proposer checks passed for block

* inherits most checks from processed block

* Assume block processing handles most checks

* lint

* cleanup unused call and gaz

* more detailed logging for e2e

* fix bad refactor breaking non-finalized init-sync

* self-review cleanup

* gaz

* Update beacon-chain/verification/blob.go

Co-authored-by: Justin Traglia <95511699+jtraglia@users.noreply.github.com>

* terence and justin feedback

---------

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Justin Traglia <95511699+jtraglia@users.noreply.github.com>
2024-01-06 23:47:09 +00:00
Radosław Kapka
8d092a1113 Revert "REST VC: Subscribe to Beacon API events (#13354)" (#13428)
This reverts commit e68b2821c1.
2024-01-06 21:36:42 +00:00
kasey
073c4edc5f use ROForkchoice in blob verifier (#13426)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-01-06 19:39:03 +00:00
terence
d055db1c31 Unlock forkchoice store if attribute is empty (#13427)
* Unlock forkchoice store if attribute is empty

* Better version
2024-01-06 07:32:56 +00:00
Nishant Das
a974627258 Make Aggregating In Parallel The Permanent Default (#13407)
* make it the permanent default

* gaz
2024-01-06 07:29:06 +00:00
Potuz
67dccc5e43 Break out several helpers from postBlockProcess (#13419)
* Break out several helpers from `postBlockProcess`

In addition fix a bug found by @terencechain where we should use a slot
context instead of the parent context in the second FCU call.

* Remove calls for tracked proposer

getPayloadAttribute already takes care of this
Also compute correctly the time into voting window

* call with attributes only when incoming block is canonical

* check for empty payload instead of only nil

* add unit tests

* move log for non-canonical block

* return early if the incoming block does not change head

* Pass fcuArgs as arguments

* lint
2024-01-06 02:29:07 +00:00
terence
ff06e08274 Prune dangling blob (#13424)
* Prune dangling blob

* Fix test

* Kasey's feedback

* Preston's feedback

* Use warning, fix test
2024-01-05 22:29:57 +00:00
james-prysm
d3d25e3ae5 proposer and attester slashing sse (#13414)
* wip

* adding in event notifiers for slashing events

* fixing tests
2024-01-05 15:27:50 +00:00
Nishant Das
929e9ddf4c enable it (#13421) 2024-01-05 05:23:22 +00:00
Nishant Das
7c0e79d432 Make New Engine Methods The Permanent Default (#13406)
* make them the default

* gaz

* fix tests
2024-01-05 04:38:04 +00:00
terence
3c1c0b3c00 Update blob pruning log (#13417) 2024-01-04 18:02:19 +00:00
james-prysm
d439e6da74 adding builder boost factor to get block v3 (#13409)
* adding builder boost factor to functions

* gaz

* fixing linting

* fixing unit tests

* gaz

* addressing review comments

* fixing tests

* addressing review feedback

* gaz

* changing log based on review
2024-01-04 17:25:18 +00:00
Radosław Kapka
e68b2821c1 REST VC: Subscribe to Beacon API events (#13354)
* Initial code for head event streaming

* handle events and error

* keepalive event

* tests

* generate new mock

* remove single case select

* cleanup

* explain eventByteLimit

* use 2 channels in test

* review

* more review

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2024-01-04 17:14:45 +00:00
Potuz
cfef8f4676 Don't hardcode 4 seconds in forkchoice (#13416) 2024-01-04 16:49:16 +00:00
terence
9709412511 Use Afero Walk for Pruning Blob (#13410)
* Use Afero walk

* Return err

* Use wrap

* More err to the end

* Fix loop
2024-01-04 16:41:00 +00:00
james-prysm
7781eb60f4 Add rpc trigger for blob sidecar event (#13411)
* adding missed rpc trigger for blob sidecar event

* fixing unit tests

* moving event feed to after receive blob call to prioritize db
2024-01-04 14:22:24 +00:00
Potuz
396b8bf970 Simplify fcu 4 (#13403)
* send two FCU when proposing

* compute voting window at runtime
2024-01-04 13:43:57 +00:00
Nishant Das
d5107942a1 update it (#13415) 2024-01-04 11:22:23 +00:00
terence
bd4a520013 Initialize blob storage without pruning (#13412) 2024-01-04 05:56:38 +00:00
Sammy Rosso
a0ff1351a0 Fix batch pruning errors (#13355)
* Add compareAndSwap

* Update lastPrunedEpoch before prune

* Fix and test

* Remove debug log

* Kasey' review

* Fix tests

* Address Kasey's comments

* Fix prune before slot

* Rename

* Fix bad test

---------

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2024-01-03 20:52:07 +00:00
Nishant Das
7e6fd5fd8b Make Reorging Of Late Blocks The Permanent Default (#13405)
* make it the permanent default

* gaz

* fix merge conflicts
2024-01-03 14:46:58 +00:00
Nishant Das
d984210baa fix it (#13404) 2024-01-03 13:54:18 +00:00
Potuz
31c72672d7 Remove the getPayloadAttribute call from updateForkchoiceWithExecution (#13402)
* Remove the getPayloadAttribute call from updateForkchoiceWithExecution

* Move log
2024-01-03 12:43:40 +00:00
Potuz
8c1e180dd1 Simplify fcu 2 (#13400)
* change getPayloadAttribute signature

* Unify different FCU arguments
2024-01-02 22:45:55 +00:00
Manu NALEPA
886d76fe7c Refactor validator client help. (#13401)
* Define `cli.App` without mutation.

No functional change.

* `usage.go`:  Clean `appHelpTemplate`.

No functional change is added.
Modifications consist in adding prefix/suffix `-` to improve readability of
the template without adding new lines in template inference.

We now see some inconsistencies of the template:
- `if .App.Version` is around the `AUTHOR` section.
- `if .App.Copyright` is around both `COPYRIGHT` and `VERSION` sections.
- `if len .App.Authors` is around nothing.

* `usage.go`: Surround version and author correctly.

* `usage.go`: `AUTHOR` ==> `AUTHORS`

* `usage.go`: `GLOBAL` --> `global`.

* `--grpc-max-msg-size`: Remove double default.

* VC: Standardize help message.

- Flags help begin with a capital letter and end with a period.
- If a flag help begins with a verb, it is conjugated.
- Expermitemtal, danger etc... mentions are between parenthesis.

* VC help message: Wrap too long lines.
2024-01-02 18:02:28 +00:00
Potuz
a602acf492 Remove getPayloadAttributes from FCU call (#13399) 2024-01-02 17:37:18 +00:00
terence
1b6547de6a Add Goerli Deneb Fork Epoch (#13390)
* Add deneb fork epoch

* Fix test
2024-01-02 15:31:57 +00:00
Nishant Das
88685bb3bd Fix Up Builder Evaluator (#13395)
* fix it up

* fix evaluator

* fix evaluator again

* fix it

* gaz
2024-01-02 10:40:26 +00:00
Nishant Das
2319b7d4bd increase params (#13398) 2024-01-02 10:19:59 +00:00
Manu NALEPA
82b2840d68 --validatorS-registration-batch-size (add s) (#13396) 2024-01-02 09:52:14 +00:00
Manu NALEPA
cf221d0f4c Validator client: Always use the --datadir value. (#13392)
Fix https://github.com/prysmaticlabs/prysm/issues/13391
2024-01-02 09:24:24 +00:00
Preston Van Loon
0956e3a657 Update libp2p/go-libp2p-asn-util to v0.4.1 (#13370)
* Update go-libp2p-asn-util to v0.4.1

* fix go mod

---------

Co-authored-by: nisdas <nishdas93@gmail.com>
2024-01-02 07:30:50 +00:00
terence
351ed1c511 Check kzg commitment count from builder (#13394) 2024-01-02 06:50:23 +00:00
Potuz
9809f5ac77 Simplify fcu 1 (#13387)
* Remove unsafe proposer indices cache

* Simplify FCU #1

This PR starts the process of gradually simplifying FCU
It removes the responsibility of getting the state and block from this
function and informing if head has changed. It is only called when the
imported block has actually become head.

* Add a call to FCU in edge cases
2023-12-30 12:20:20 +00:00
Potuz
cff5e2b5fe Remove unsafe proposer indices cache (#13385) 2023-12-30 12:20:02 +00:00
terence
dd15f9e0cc Rewrite ProposeBlock endpoint (#13380)
* Init

* Tests

* Init

* Tests

* Radek's feedback

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* More Radek's feedback

* Potuz feedback

* Use inline copy

* Fix conflict

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-12-29 23:32:58 +00:00
terence
1c9ded4684 Remove blind field from block type (#13389)
* Init

* Init

* Fix tests
2023-12-29 21:28:19 +00:00
Potuz
d4cc6fcf4a update shuffling caches before calling FCU on epoch boundaries (#13383)
* update shuffling caches before calling FCU on epoch boundaries

* Terence's review
2023-12-28 15:19:09 +00:00
Radosław Kapka
49c16f1a71 Return SignedBeaconBlock from ReadOnlySignedBeaconBlock.Copy (#13386) 2023-12-28 08:55:45 +00:00
terence
e70b606e78 Replace validator count with validator indices in update fee recipient log (#13384)
* Add validator count to updated fee recipient address log

* Add validator count to updated fee recipient address log

* Replace
2023-12-27 16:46:15 +00:00
Potuz
0e8b37c317 Log value of local payload when proposing (#13381) 2023-12-27 14:43:32 +00:00
Potuz
e80db9554d Use advanced epoch cache when preparing proposals (#13377) 2023-12-27 12:42:51 +00:00
176 changed files with 4246 additions and 3740 deletions

View File

@@ -322,6 +322,22 @@ filegroup(
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
)
http_archive(
name = "goerli_testnet",
build_file_content = """
filegroup(
name = "configs",
srcs = [
"prater/config.yaml",
],
visibility = ["//visibility:public"],
)
""",
sha256 = "43fc0f55ddff7b511713e2de07aa22846a67432df997296fb4fc09cd8ed1dcdb",
strip_prefix = "goerli-6522ac6684693740cd4ddcc2a0662e03702aa4a1",
url = "https://github.com/eth-clients/goerli/archive/6522ac6684693740cd4ddcc2a0662e03702aa4a1.tar.gz",
)
http_archive(
name = "holesky_testnet",
build_file_content = """

View File

@@ -275,7 +275,24 @@ func TestClient_GetHeader(t *testing.T) {
require.Equal(t, len(kcgCommitments[i]) == 48, true)
}
})
t.Run("deneb, too many kzg commitments", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseDenebTooManyBlobs)),
Request: r.Clone(ctx),
}, nil
}),
}
c := &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorContains(t, "could not extract proto message from header: too many blob commitments: 7", err)
})
t.Run("unsupported version", func(t *testing.T) {
hc := &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
@@ -412,7 +429,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
require.ErrorContains(t, "not a bellatrix payload", err)
})
t.Run("not blinded", func(t *testing.T) {
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{}}})
sbb, err := blocks.NewSignedBeaconBlock(&eth.SignedBeaconBlockBellatrix{Block: &eth.BeaconBlockBellatrix{Body: &eth.BeaconBlockBodyBellatrix{ExecutionPayload: &v1.ExecutionPayload{}}}})
require.NoError(t, err)
_, _, err = (&Client{}).SubmitBlindedBlock(ctx, sbb)
require.ErrorIs(t, err, errNotBlinded)

View File

@@ -908,6 +908,9 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
if err != nil {
return nil, err
}
if len(bb.BlobKzgCommitments) > fieldparams.MaxBlobsPerBlock {
return nil, fmt.Errorf("too many blob commitments: %d", len(bb.BlobKzgCommitments))
}
kzgCommitments := make([][]byte, len(bb.BlobKzgCommitments))
for i, commit := range bb.BlobKzgCommitments {
if len(commit) != fieldparams.BLSPubkeyLength {

View File

@@ -209,6 +209,39 @@ var testExampleHeaderResponseUnknownVersion = `{
}
}`
var testExampleHeaderResponseDenebTooManyBlobs = `{
"version": "deneb",
"data": {
"message": {
"header": {
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"block_number": "1",
"gas_limit": "1",
"gas_used": "1",
"timestamp": "1",
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"base_fee_per_gas": "452312848583266388373324160190187140051835877600158453279131187530910662656",
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
"blob_gas_used": "1",
"excess_blob_gas": "2"
},
"blob_kzg_commitments": [
"","","","","","",""
],
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
},
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
}
}`
func TestExecutionHeaderResponseUnmarshal(t *testing.T) {
hr := &ExecHeaderResponse{}
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), hr))

View File

@@ -50,6 +50,7 @@ go_library(
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/das:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/filters:go_default_library",
@@ -141,6 +142,7 @@ go_test(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/das:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/testing:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
@@ -31,21 +32,18 @@ const blobCommitmentVersionKZG uint8 = 0x01
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
// notifyForkchoiceUpdateArg is the argument for the forkchoice update notification `notifyForkchoiceUpdate`.
type notifyForkchoiceUpdateArg struct {
headState state.BeaconState
headRoot [32]byte
headBlock interfaces.ReadOnlyBeaconBlock
}
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkchoiceUpdateArg) (*enginev1.PayloadIDBytes, error) {
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*enginev1.PayloadIDBytes, error) {
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
defer span.End()
headBlk := arg.headBlock
if arg.headBlock.IsNil() {
log.Error("Head block is nil")
return nil, nil
}
headBlk := arg.headBlock.Block()
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
log.Error("Head block is nil")
return nil, nil
@@ -71,11 +69,10 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
SafeBlockHash: justifiedHash[:],
FinalizedBlockHash: finalizedHash[:],
}
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
hasAttr, attr := s.getPayloadAttribute(ctx, arg.headState, nextSlot, arg.headRoot[:])
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
if arg.attributes == nil {
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
}
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
if err != nil {
switch err {
case execution.ErrAcceptedSyncingPayloadStatus:
@@ -121,10 +118,11 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
log.WithError(err).Error("Could not get head state")
return nil, nil
}
pid, err := s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: st,
headRoot: r,
headBlock: b.Block(),
pid, err := s.notifyForkchoiceUpdate(ctx, &fcuConfig{
headState: st,
headRoot: r,
headBlock: b,
attributes: arg.attributes,
})
if err != nil {
return nil, err // Returning err because it's recursive here.
@@ -153,6 +151,8 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
return nil, nil
}
// If the forkchoice update call has an attribute, update the payload ID cache.
hasAttr := arg.attributes != nil && !arg.attributes.IsEmpty()
nextSlot := s.CurrentSlot() + 1
if hasAttr && payloadID != nil {
var pId [8]byte
copy(pId[:], payloadID[:])
@@ -276,35 +276,50 @@ func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, lvh [
// getPayloadAttributes returns the payload attributes for the given state and slot.
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) (bool, payloadattribute.Attributer) {
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) payloadattribute.Attributer {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
val, ok := s.trackedProposer(st, slot)
if !ok && !features.Get().PrepareAllPayloads {
return false, emptyAttri
// If it is an epoch boundary then process slots to get the right
// shuffling before checking if the proposer is tracked. Otherwise
// perform this check before. This is cheap as the NSC has already been updated.
var val cache.TrackedValidator
var ok bool
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(st.Slot())
if e == stateEpoch {
val, ok = s.trackedProposer(st, slot)
if !ok {
return emptyAttri
}
}
// Get previous randao.
st = st.Copy()
if slot > st.Slot() {
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
log.WithError(err).Error("Could not process slots to get payload attribute")
return false, emptyAttri
return emptyAttri
}
}
if e > stateEpoch {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
val, ok = s.trackedProposer(st, slot)
if !ok {
return emptyAttri
}
}
// Get previous randao.
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
if err != nil {
log.WithError(err).Error("Could not get randao mix to get payload attribute")
return false, emptyAttri
return emptyAttri
}
// Get timestamp.
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
if err != nil {
log.WithError(err).Error("Could not get timestamp to get payload attribute")
return false, emptyAttri
return emptyAttri
}
var attr payloadattribute.Attributer
@@ -313,7 +328,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return false, emptyAttri
return emptyAttri
}
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV3{
Timestamp: uint64(t.Unix()),
@@ -324,13 +339,13 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return false, emptyAttri
return emptyAttri
}
case version.Capella:
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
return false, emptyAttri
return emptyAttri
}
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
Timestamp: uint64(t.Unix()),
@@ -340,7 +355,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return false, emptyAttri
return emptyAttri
}
case version.Bellatrix:
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
@@ -350,14 +365,14 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
})
if err != nil {
log.WithError(err).Error("Could not get payload attribute")
return false, emptyAttri
return emptyAttri
}
default:
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
return false, emptyAttri
return emptyAttri
}
return true, attr
return attr
}
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.
@@ -372,10 +387,7 @@ func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
return err
}
// No op if the sidecar does not exist.
if err := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err != nil {
return err
}
// TODO: delete blobs from storage
}
return nil
}

View File

@@ -56,11 +56,14 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
require.NoError(t, err)
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
sb := &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
},
})
}
b, err := consensusblocks.NewSignedBeaconBlock(sb)
require.NoError(t, err)
pid := &v1.PayloadIDBytes{1}
@@ -72,7 +75,7 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
// Intentionally generate a bad state such that `hash_tree_root` fails during `process_slot`
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{})
require.NoError(t, err)
arg := &notifyForkchoiceUpdateArg{
arg := &fcuConfig{
headState: s,
headRoot: [32]byte{},
headBlock: b,
@@ -113,7 +116,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
tests := []struct {
name string
blk interfaces.ReadOnlyBeaconBlock
blk interfaces.ReadOnlySignedBeaconBlock
headRoot [32]byte
finalizedRoot [32]byte
justifiedRoot [32]byte
@@ -122,24 +125,24 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
}{
{
name: "phase0 block",
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}})
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlock{Block: &ethpb.BeaconBlock{Body: &ethpb.BeaconBlockBody{}}})
require.NoError(t, err)
return b
}(),
},
{
name: "altair block",
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockAltair{Body: &ethpb.BeaconBlockBodyAltair{}})
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockAltair{Block: &ethpb.BeaconBlockAltair{Body: &ethpb.BeaconBlockBodyAltair{}}})
require.NoError(t, err)
return b
}(),
},
{
name: "not execution block",
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
@@ -152,19 +155,19 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
BlockHash: make([]byte, fieldparams.RootLength),
},
},
})
}})
require.NoError(t, err)
return b
}(),
},
{
name: "happy case: finalized root is altair block",
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
}})
require.NoError(t, err)
return b
}(),
@@ -173,12 +176,12 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
},
{
name: "happy case: finalized root is bellatrix block",
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
}})
require.NoError(t, err)
return b
}(),
@@ -187,12 +190,12 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
},
{
name: "forkchoice updated with optimistic block",
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
}})
require.NoError(t, err)
return b
}(),
@@ -202,12 +205,12 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
},
{
name: "forkchoice updated with invalid block",
blk: func() interfaces.ReadOnlyBeaconBlock {
b, err := consensusblocks.NewBeaconBlock(&ethpb.BeaconBlockBellatrix{
blk: func() interfaces.ReadOnlySignedBeaconBlock {
b, err := consensusblocks.NewSignedBeaconBlock(&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{
ExecutionPayload: &v1.ExecutionPayload{},
},
})
}})
require.NoError(t, err)
return b
}(),
@@ -225,7 +228,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
st, _ := util.DeterministicGenesisState(t, 1)
require.NoError(t, beaconDB.SaveState(ctx, st, tt.finalizedRoot))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, tt.finalizedRoot))
arg := &notifyForkchoiceUpdateArg{
arg := &fcuConfig{
headState: st,
headRoot: tt.headRoot,
headBlock: tt.blk,
@@ -305,9 +308,9 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
a := &notifyForkchoiceUpdateArg{
a := &fcuConfig{
headState: st,
headBlock: wbd.Block(),
headBlock: wbd,
headRoot: brd,
}
_, err = service.notifyForkchoiceUpdate(ctx, a)
@@ -442,9 +445,9 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
require.NoError(t, beaconDB.SaveState(ctx, st, bra))
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bra))
a := &notifyForkchoiceUpdateArg{
a := &fcuConfig{
headState: st,
headBlock: wbg.Block(),
headBlock: wbg,
headRoot: brg,
}
_, err = service.notifyForkchoiceUpdate(ctx, a)
@@ -780,23 +783,23 @@ func Test_GetPayloadAttribute(t *testing.T) {
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
hasPayload, _ := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, false, hasPayload)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, true, attr.IsEmpty())
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
hasPayload, attr := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
hasPayload, attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
}
@@ -810,8 +813,8 @@ func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
hasPayload, attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, true, hasPayload)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
}
@@ -820,15 +823,15 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateCapella(t, 1)
hasPayload, _ := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, false, hasPayload)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, true, attr.IsEmpty())
// Cache hit, advance state, no fee recipient
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
hasPayload, attr := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
a, err := attr.Withdrawals()
require.NoError(t, err)
@@ -838,8 +841,8 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
hasPayload, attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()
require.NoError(t, err)
@@ -851,15 +854,15 @@ func Test_GetPayloadAttributeDeneb(t *testing.T) {
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
hasPayload, _ := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, false, hasPayload)
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
require.Equal(t, true, attr.IsEmpty())
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
hasPayload, attr := service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
a, err := attr.Withdrawals()
require.NoError(t, err)
@@ -869,8 +872,8 @@ func Test_GetPayloadAttributeDeneb(t *testing.T) {
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
hasPayload, attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, true, hasPayload)
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
require.Equal(t, false, attr.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
a, err = attr.Withdrawals()
require.NoError(t, err)
@@ -1093,3 +1096,35 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
require.Equal(t, vhs[0].String(), vh0)
require.Equal(t, vhs[1].String(), vh1)
}
func TestComputePayloadAttribute(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
cfg := &postBlockProcessConfig{
ctx: ctx,
blockRoot: [32]byte{'a'},
}
fcu := &fcuConfig{
headState: st,
proposingSlot: slot,
headRoot: [32]byte{},
}
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
require.Equal(t, false, fcu.attributes.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
require.Equal(t, false, fcu.attributes.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
}

View File

@@ -8,9 +8,9 @@ import (
"github.com/pkg/errors"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
payloadattribute "github.com/prysmaticlabs/prysm/v4/consensus-types/payload-attribute"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
@@ -44,50 +44,69 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
return headState, newHeadBlock, nil
}
type fcuConfig struct {
headState state.BeaconState
headBlock interfaces.ReadOnlySignedBeaconBlock
headRoot [32]byte
proposingSlot primitives.Slot
attributes payloadattribute.Attributer
}
// sendFCU handles the logic to notify the engine of a forckhoice update
// for the first time when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions when the
// incoming block is late, preparing payload attributes in this case while it
// only sends a message with empty attributes for early blocks.
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if !s.isNewHead(cfg.headRoot) {
return nil
}
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
return nil
}
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
}
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
// to the engine if needed
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
cfg.ctx = slotCtx
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
log.WithError(err).Error("could not compute payload attributes")
return
}
if fcuArgs.attributes.IsEmpty() {
return
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
log.WithError(err).Error("could not update forkchoice with payload attributes for proposal")
}
}
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
// it returns true if the new head is updated
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, newHeadRoot [32]byte, proposingSlot primitives.Slot) (bool, error) {
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
defer span.End()
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
ctx = trace.NewContext(s.ctx, span)
isNewHead := s.isNewHead(newHeadRoot)
if !isNewHead {
return false, nil
}
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
_, err := s.notifyForkchoiceUpdate(ctx, args)
if err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return false, nil
return errors.Wrap(err, "could not notify forkchoice update")
}
_, tracked := s.trackedProposer(headState, proposingSlot)
if (tracked || features.Get().PrepareAllPayloads) && !features.Get().DisableReorgLateBlocks {
if s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return false, nil
}
}
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
headState: headState,
headRoot: newHeadRoot,
headBlock: headBlock.Block(),
})
if err != nil {
return false, errors.Wrap(err, "could not notify forkchoice update")
}
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
log.WithError(err).Error("could not save head")
}
// Only need to prune attestations from pool if the head has changed.
if err := s.pruneAttsFromPool(headBlock); err != nil {
if err := s.pruneAttsFromPool(args.headBlock); err != nil {
log.WithError(err).Error("could not prune attestations from pool")
}
return true, nil
return nil
}
// shouldOverrideFCU checks whether the incoming block is still subject to being

View File

@@ -58,33 +58,14 @@ func TestService_getHeadStateAndBlock(t *testing.T) {
}
func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
hook := logTest.NewGlobal()
ctx := context.Background()
opts := testServiceOptsWithDB(t)
service, err := NewService(ctx, opts...)
require.NoError(t, err)
service.cfg.PayloadIDCache = cache.NewPayloadIDCache()
_, err = service.forkchoiceUpdateWithExecution(ctx, service.headRoot(), service.CurrentSlot()+1)
require.NoError(t, err)
hookErr := "could not notify forkchoice update"
invalidStateErr := "could not get state summary: could not find block in DB"
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
gb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
require.NoError(t, service.saveInitSyncBlock(ctx, [32]byte{'a'}, gb))
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{'a'}, service.CurrentSlot()+1)
require.NoError(t, err)
require.LogsContain(t, hook, invalidStateErr)
service.cfg.TrackedValidatorsCache = cache.NewTrackedValidatorsCache()
hook.Reset()
service.head = &head{
root: [32]byte{'a'},
block: nil, /* should not panic if notify head uses correct head */
}
// Block in Cache
b := util.NewBeaconBlock()
b.Block.Slot = 2
wsb, err := blocks.NewSignedBeaconBlock(b)
@@ -99,12 +80,6 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
state: st,
}
service.cfg.PayloadIDCache.Set(2, [32]byte{2}, [8]byte{1})
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot())
require.NoError(t, err)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
// Block in DB
b = util.NewBeaconBlock()
b.Block.Slot = 3
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
@@ -117,19 +92,17 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
state: st,
}
service.cfg.PayloadIDCache.Set(2, [32]byte{2}, [8]byte{1})
_, err = service.forkchoiceUpdateWithExecution(ctx, r1, service.CurrentSlot()+1)
require.NoError(t, err)
require.LogsDoNotContain(t, hook, invalidStateErr)
require.LogsDoNotContain(t, hook, hookErr)
args := &fcuConfig{
headState: st,
headRoot: r1,
headBlock: wsb,
proposingSlot: service.CurrentSlot() + 1,
}
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
require.Equal(t, true, has)
require.Equal(t, primitives.PayloadID{1}, payloadID)
// Test zero headRoot returns immediately.
headRoot := service.headRoot()
_, err = service.forkchoiceUpdateWithExecution(ctx, [32]byte{}, service.CurrentSlot()+1)
require.NoError(t, err)
require.Equal(t, service.headRoot(), headRoot)
}
func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testing.T) {
@@ -173,9 +146,13 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
service.head.block = sb
service.head.state = st
service.cfg.PayloadIDCache.Set(service.CurrentSlot()+1, [32]byte{} /* root */, [8]byte{})
_, err = service.forkchoiceUpdateWithExecution(ctx, r, service.CurrentSlot()+1)
require.NoError(t, err)
args := &fcuConfig{
headState: st,
headBlock: sb,
headRoot: r,
proposingSlot: service.CurrentSlot() + 1,
}
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
}
func TestShouldOverrideFCU(t *testing.T) {

View File

@@ -1,42 +1,32 @@
package kzg
import (
"fmt"
GoKZG "github.com/crate-crypto/go-kzg-4844"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
// IsDataAvailable checks that
// - all blobs in the block are available
// - Expected KZG commitments match the number of blobs in the block
// - That the number of proofs match the number of blobs
// - That the proofs are verified against the KZG commitments
func IsDataAvailable(commitments [][]byte, sidecars []*ethpb.DeprecatedBlobSidecar) error {
if len(commitments) != len(sidecars) {
return fmt.Errorf("could not check data availability, expected %d commitments, obtained %d",
len(commitments), len(sidecars))
}
if len(commitments) == 0 {
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
func Verify(sidecars ...blocks.ROBlob) error {
if len(sidecars) == 0 {
return nil
}
blobs := make([]GoKZG.Blob, len(commitments))
proofs := make([]GoKZG.KZGProof, len(commitments))
cmts := make([]GoKZG.KZGCommitment, len(commitments))
if len(sidecars) == 1 {
return kzgContext.VerifyBlobKZGProof(
bytesToBlob(sidecars[0].Blob),
bytesToCommitment(sidecars[0].KzgCommitment),
bytesToKZGProof(sidecars[0].KzgProof))
}
blobs := make([]GoKZG.Blob, len(sidecars))
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
proofs := make([]GoKZG.KZGProof, len(sidecars))
for i, sidecar := range sidecars {
blobs[i] = bytesToBlob(sidecar.Blob)
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
cmts[i] = bytesToCommitment(commitments[i])
}
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
}
// VerifyROBlobCommitment is a helper that massages the fields of an ROBlob into the types needed to call VerifyBlobKZGProof.
func VerifyROBlobCommitment(sc blocks.ROBlob) error {
return kzgContext.VerifyBlobKZGProof(bytesToBlob(sc.Blob), bytesToCommitment(sc.KzgCommitment), bytesToKZGProof(sc.KzgProof))
}
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
copy(ret[:], blob)
return

View File

@@ -8,7 +8,7 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
GoKZG "github.com/crate-crypto/go-kzg-4844"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/sirupsen/logrus"
)
@@ -59,9 +59,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
}
func TestIsDataAvailable(t *testing.T) {
sidecars := make([]*ethpb.DeprecatedBlobSidecar, 0)
commitments := make([][]byte, 0)
require.NoError(t, IsDataAvailable(commitments, sidecars))
sidecars := make([]blocks.ROBlob, 0)
require.NoError(t, Verify(sidecars[0]))
}
func TestBytesToAny(t *testing.T) {

View File

@@ -358,6 +358,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
for name, val := range refMap {
stateTrieReferences.WithLabelValues(name).Set(float64(val))
}
postState.RecordStateMetrics()
return nil
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
@@ -28,7 +29,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -41,106 +41,67 @@ const depositDeadline = 20 * time.Second
// This defines size of the upper bound for initial sync block cache.
var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
// postBlockProcessConfig is a structure that contains the data needed to
// process the beacon block after validating the state transition function
type postBlockProcessConfig struct {
ctx context.Context
signed interfaces.ReadOnlySignedBeaconBlock
blockRoot [32]byte
headRoot [32]byte
postState state.BeaconState
isValidPayload bool
}
// postBlockProcess is called when a gossip block is received. This function performs
// several duties most importantly informing the engine if head was updated,
// saving the new head information to the blockchain package and
// handling attestations, slashings and similar included in the block.
func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, postState state.BeaconState, isValidPayload bool) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlock")
func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
ctx, span := trace.StartSpan(cfg.ctx, "blockChain.onBlock")
defer span.End()
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
cfg.ctx = ctx
if err := consensusblocks.BeaconBlockIsNil(cfg.signed); err != nil {
return invalidBlock{error: err}
}
startTime := time.Now()
b := signed.Block()
fcuArgs := &fcuConfig{}
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, postState, blockRoot); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
defer s.handleSecondFCUCall(cfg, fcuArgs)
defer s.sendStateFeedOnBlock(cfg)
defer reportProcessingTime(startTime)
defer reportAttestationInclusion(cfg.signed.Block())
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.blockRoot)
if err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.signed.Block().Slot())
}
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
if err := s.handleBlockAttestations(ctx, cfg.signed.Block(), cfg.postState); err != nil {
return errors.Wrap(err, "could not handle block's attestations")
}
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
s.InsertSlashingsToForkChoiceStore(ctx, cfg.signed.Block().Body().AttesterSlashings())
if cfg.isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.blockRoot); err != nil {
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
start := time.Now()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
cfg.headRoot, err = s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
log.WithError(err).Warn("Could not update head")
}
if blockRoot != headRoot {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
}
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
}
log.WithFields(logrus.Fields{
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
"receivedWeight": receivedWeight,
"headRoot": fmt.Sprintf("%#x", headRoot),
"headWeight": headWeight,
}).Debug("Head block is not the received block")
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
// verify conditions for FCU, notifies FCU, and saves the new head.
// This function also prunes attestations, other similar operations happen in prunePostBlockOperationPools.
if _, err := s.forkchoiceUpdateWithExecution(ctx, headRoot, s.CurrentSlot()+1); err != nil {
return err
if cfg.headRoot != cfg.blockRoot {
s.logNonCanonicalBlockReceived(cfg.blockRoot, cfg.headRoot)
return nil
}
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(blockRoot)
if err != nil {
log.WithError(err).Debug("Could not check if block is optimistic")
optimistic = true
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return nil
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: signed.Block().Slot(),
BlockRoot: blockRoot,
SignedBlock: signed,
Verified: true,
Optimistic: optimistic,
},
})
defer reportAttestationInclusion(b)
if headRoot == blockRoot {
// Updating next slot state cache can happen in the background
// except in the epoch boundary in which case we lock to handle
// the shuffling and proposer caches updates.
// We handle these caches only on canonical
// blocks, otherwise this will be handled by lateBlockTasks
slot := postState.Slot()
if slots.IsEpochEnd(slot) {
if err := transition.UpdateNextSlotCache(ctx, blockRoot[:], postState); err != nil {
return errors.Wrap(err, "could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, slot, postState, blockRoot[:]); err != nil {
return errors.Wrap(err, "could not handle epoch boundary")
}
} else {
go func() {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Error("could not update next slot state cache")
}
}()
}
if err := s.sendFCU(cfg, fcuArgs); err != nil {
return errors.Wrap(err, "could not send FCU to engine")
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
@@ -162,7 +123,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock) error {
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
@@ -265,8 +226,8 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return err
}
}
if err := s.databaseDACheck(ctx, b); err != nil {
return errors.Wrap(err, "could not validate blob data availability")
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
}
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
JustifiedCheckpoint: jCheckpoints[i],
@@ -322,10 +283,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
arg := &notifyForkchoiceUpdateArg{
arg := &fcuConfig{
headState: preState,
headRoot: lastBR,
headBlock: lastB.Block(),
headBlock: lastB,
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return err
@@ -333,37 +294,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
func commitmentsToCheck(b consensusblocks.ROBlock, current primitives.Slot) [][]byte {
if b.Version() < version.Deneb {
return nil
}
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
return nil
}
kzgCommitments, err := b.Block().Body().BlobKzgCommitments()
if err != nil {
return nil
}
return kzgCommitments
}
func (s *Service) databaseDACheck(ctx context.Context, b consensusblocks.ROBlock) error {
commitments := commitmentsToCheck(b, s.CurrentSlot())
if len(commitments) == 0 {
return nil
}
missing, err := missingIndices(s.blobStorage, b.Root(), commitments)
if err != nil {
return err
}
if len(missing) == 0 {
return nil
}
// TODO: don't worry that this error isn't informative, it will be superceded by a detailed sidecar cache error.
return errors.New("not all kzg commitments are available")
}
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
@@ -381,9 +311,6 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
if err := helpers.UpdateCommitteeCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Could not update committee cache")
}
if err := helpers.UpdateUnsafeProposerIndicesInCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Failed to cache next epoch proposers")
}
}()
// The latest block header is from the previous epoch
r, err := st.LatestBlockHeader().HashTreeRoot()
@@ -674,11 +601,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
}
s.cfg.ForkChoiceStore.RUnlock()
_, tracked := s.trackedProposer(headState, s.CurrentSlot()+1)
// return early if we are not proposing next slot.
if !tracked && !features.Get().PrepareAllPayloads {
return
}
// return early if we already started building a block for the current
// head root
_, has := s.cfg.PayloadIDCache.PayloadID(s.CurrentSlot()+1, headRoot)
@@ -693,12 +615,19 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
return
}
s.headLock.RUnlock()
s.cfg.ForkChoiceStore.RLock()
_, err = s.notifyForkchoiceUpdate(ctx, &notifyForkchoiceUpdateArg{
fcuArgs := &fcuConfig{
headState: headState,
headRoot: headRoot,
headBlock: headBlock.Block(),
})
headBlock: headBlock,
}
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
// return early if we are not proposing next slot
if fcuArgs.attributes.IsEmpty() {
return
}
s.cfg.ForkChoiceStore.RLock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
s.cfg.ForkChoiceStore.RUnlock()
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")

View File

@@ -3,9 +3,13 @@ package blockchain
import (
"context"
"fmt"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
@@ -15,8 +19,8 @@ import (
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
mathutil "github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@@ -25,6 +29,127 @@ func (s *Service) CurrentSlot() primitives.Slot {
return slots.CurrentSlot(uint64(s.genesisTime.Unix()))
}
// getFCUArgs returns the arguments to call forkchoice update
func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
return err
}
slot := cfg.signed.Block().Slot()
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
return nil
}
return s.computePayloadAttributes(cfg, fcuArgs)
}
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if cfg.blockRoot == cfg.headRoot {
fcuArgs.headState = cfg.postState
fcuArgs.headBlock = cfg.signed
fcuArgs.headRoot = cfg.headRoot
fcuArgs.proposingSlot = s.CurrentSlot() + 1
return nil
}
return s.fcuArgsNonCanonicalBlock(cfg, fcuArgs)
}
// logNonCanonicalBlockReceived prints a message informing that the received
// block is not the head of the chain. It requires the caller holds a lock on
// Foprkchoice.
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("could not determine node weight")
}
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
if err != nil {
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("could not determine node weight")
}
log.WithFields(logrus.Fields{
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
"receivedWeight": receivedWeight,
"headRoot": fmt.Sprintf("%#x", headRoot),
"headWeight": headWeight,
}).Debug("Head block is not the received block")
}
// fcuArgsNonCanonicalBlock returns the arguments to the FCU call when the
// incoming block is non-canonical, that is, based on the head root.
func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot)
if err != nil {
return err
}
fcuArgs.headState = headState
fcuArgs.headBlock = headBlock
fcuArgs.headRoot = cfg.headRoot
fcuArgs.proposingSlot = s.CurrentSlot() + 1
return nil
}
// sendStateFeedOnBlock sends an event that a new block has been synced
func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.blockRoot)
if err != nil {
log.WithError(err).Debug("Could not check if block is optimistic")
optimistic = true
}
// Send notification of the processed block to the state feed.
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.BlockProcessed,
Data: &statefeed.BlockProcessedData{
Slot: cfg.signed.Block().Slot(),
BlockRoot: cfg.blockRoot,
SignedBlock: cfg.signed,
Verified: true,
Optimistic: optimistic,
},
})
}
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
// boundary in order to compute the right proposer indices after processing
// state transition. This function is called on late blocks while still locked,
// before sending FCU to the engine.
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
slot := cfg.postState.Slot()
if err := transition.UpdateNextSlotCache(cfg.ctx, cfg.blockRoot[:], cfg.postState); err != nil {
return errors.Wrap(err, "could not update next slot state cache")
}
if !slots.IsEpochEnd(slot) {
return nil
}
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, cfg.blockRoot[:])
}
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
// This is useful when proposing in the next block and we want to defer the
// computation of the next slot shuffling.
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.blockRoot {
go s.sendFCUWithAttributes(cfg, fcuArgs)
}
}
// reportProcessingTime reports the metric of how long it took to process the
// current block
func reportProcessingTime(startTime time.Time) {
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
}
// computePayloadAttributes modifies the passed FCU arguments to
// contain the right payload attributes with the tracked proposer. It gets
// called on blocks that arrive after the attestation voting window, or in a
// background routine after syncing early blocks.
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if cfg.blockRoot == cfg.headRoot {
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
return err
}
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return nil
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.

View File

@@ -1,7 +1,6 @@
package blockchain
import (
"bytes"
"context"
"fmt"
"math/big"
@@ -17,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
@@ -40,7 +40,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
)
@@ -69,7 +68,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
require.NoError(t, err)
blks = append(blks, rwsb)
}
err := service.onBlockBatch(ctx, blks)
err := service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{})
require.NoError(t, err)
jcp := service.CurrentJustifiedCheckpt()
jroot := bytesutil.ToBytes32(jcp.Root)
@@ -99,7 +98,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb))
blks = append(blks, rwsb)
}
require.NoError(t, service.onBlockBatch(ctx, blks))
require.NoError(t, service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{}))
}
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
@@ -567,7 +566,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -615,7 +614,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, true))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -641,7 +640,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
func TestOnBlock_NilBlock(t *testing.T) {
service, tr := minimalTestService(t)
err := service.postBlockProcess(tr.ctx, nil, [32]byte{}, nil, true)
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, nil, [32]byte{}, [32]byte{}, nil, true})
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -689,7 +688,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, r, postState, false))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, false}))
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -1111,7 +1110,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb1)
require.NoError(t, err)
lock.Lock()
require.NoError(t, service.postBlockProcess(ctx, wsb1, r1, postState, true))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb1, r1, [32]byte{}, postState, true}))
lock.Unlock()
wg.Done()
}()
@@ -1121,7 +1120,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb2)
require.NoError(t, err)
lock.Lock()
require.NoError(t, service.postBlockProcess(ctx, wsb2, r2, postState, true))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb2, r2, [32]byte{}, postState, true}))
lock.Unlock()
wg.Done()
}()
@@ -1131,7 +1130,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb3)
require.NoError(t, err)
lock.Lock()
require.NoError(t, service.postBlockProcess(ctx, wsb3, r3, postState, true))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb3, r3, [32]byte{}, postState, true}))
lock.Unlock()
wg.Done()
}()
@@ -1141,7 +1140,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb4)
require.NoError(t, err)
lock.Lock()
require.NoError(t, service.postBlockProcess(ctx, wsb4, r4, postState, true))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb4, r4, [32]byte{}, postState, true}))
lock.Unlock()
wg.Done()
}()
@@ -1216,7 +1215,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
@@ -1234,7 +1233,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
require.NoError(t, err)
}
@@ -1253,7 +1252,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
@@ -1275,7 +1274,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1303,7 +1302,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head is the last invalid block imported. The
// store's headroot is the previous head (since the invalid block did
@@ -1332,7 +1331,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1394,7 +1393,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
@@ -1412,7 +1411,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
require.NoError(t, err)
}
@@ -1432,7 +1431,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
@@ -1454,7 +1453,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, firstInvalidRoot, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1510,7 +1509,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1574,7 +1573,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
@@ -1593,7 +1592,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
require.NoError(t, err)
}
@@ -1612,7 +1611,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1639,7 +1638,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
err = service.postBlockProcess(ctx, wsb, invalidRoots[i-13], postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, invalidRoots[i-13], [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we have justified the second epoch
@@ -1704,7 +1703,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, true))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}))
// Check that the head is still INVALID and the node is still optimistic
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
optimistic, err = service.IsOptimistic(ctx)
@@ -1727,7 +1726,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
@@ -1753,7 +1752,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, true)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
require.NoError(t, err)
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
@@ -1809,7 +1808,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
@@ -1827,7 +1826,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
err = service.postBlockProcess(ctx, wsb, root, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
require.NoError(t, err)
}
@@ -1846,7 +1845,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
err = service.postBlockProcess(ctx, wsb, lastValidRoot, postState, false)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1875,7 +1874,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -1946,7 +1945,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
rwsb, err := consensusblocks.NewROBlock(wsb)
require.NoError(t, err)
// We use onBlockBatch here because the valid chain is missing in forkchoice
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}))
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, &das.MockAvailabilityStore{}))
// Check that the head is now VALID and the node is not optimistic
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
headRoot, err = service.HeadRoot(ctx)
@@ -1990,7 +1989,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, root, postState, false))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2045,78 +2044,10 @@ func TestFillMissingBlockPayloadId_PrepareAllPayloads(t *testing.T) {
// Helper function to simulate the block being on time or delayed for proposer
// boost. It alters the genesisTime tracked by the store.
func driftGenesisTime(s *Service, slot, delay int64) {
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) - delay
offset := slot*int64(params.BeaconConfig().SecondsPerSlot) + delay
s.SetGenesisTime(time.Unix(time.Now().Unix()-offset, 0))
}
func Test_commitmentsToCheck(t *testing.T) {
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
require.NoError(t, err)
commits := [][]byte{
bytesutil.PadTo([]byte("a"), 48),
bytesutil.PadTo([]byte("b"), 48),
bytesutil.PadTo([]byte("c"), 48),
bytesutil.PadTo([]byte("d"), 48),
}
cases := []struct {
name string
commits [][]byte
block func(*testing.T) consensusblocks.ROBlock
slot primitives.Slot
}{
{
name: "pre deneb",
block: func(t *testing.T) consensusblocks.ROBlock {
bb := util.NewBeaconBlockBellatrix()
sb, err := consensusblocks.NewSignedBeaconBlock(bb)
require.NoError(t, err)
rb, err := consensusblocks.NewROBlock(sb)
require.NoError(t, err)
return rb
},
},
{
name: "commitments within da",
block: func(t *testing.T) consensusblocks.ROBlock {
d := util.NewBeaconBlockDeneb()
d.Block.Body.BlobKzgCommitments = commits
d.Block.Slot = 100
sb, err := consensusblocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := consensusblocks.NewROBlock(sb)
require.NoError(t, err)
return rb
},
commits: commits,
slot: 100,
},
{
name: "commitments outside da",
block: func(t *testing.T) consensusblocks.ROBlock {
d := util.NewBeaconBlockDeneb()
// block is from slot 0, "current slot" is window size +1 (so outside the window)
d.Block.Body.BlobKzgCommitments = commits
sb, err := consensusblocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := consensusblocks.NewROBlock(sb)
require.NoError(t, err)
return rb
},
slot: windowSlots + 1,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
b := c.block(t)
co := commitmentsToCheck(b, c.slot)
require.Equal(t, len(c.commits), len(co))
for i := 0; i < len(c.commits); i++ {
require.Equal(t, true, bytes.Equal(c.commits[i], co[i]))
}
})
}
}
func TestMissingIndices(t *testing.T) {
cases := []struct {
name string
@@ -2197,6 +2128,35 @@ func TestMissingIndices(t *testing.T) {
}
}
func Test_getFCUArgs(t *testing.T) {
s, tr := minimalTestService(t)
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
signed: wsb,
blockRoot: [32]byte{'a'},
postState: st,
isValidPayload: true,
}
// error branch
fcuArgs := &fcuConfig{}
err = s.getFCUArgs(cfg, fcuArgs)
require.ErrorContains(t, "block does not exist", err)
// canonical branch
cfg.headRoot = cfg.blockRoot
fcuArgs = &fcuConfig{}
err = s.getFCUArgs(cfg, fcuArgs)
require.NoError(t, err)
require.Equal(t, cfg.blockRoot, fcuArgs.headRoot)
}
func fakeCommitments(n int) [][]byte {
f := make([][]byte, n)
for i := range f {

View File

@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -122,35 +121,41 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
defer s.cfg.ForkChoiceStore.Unlock()
// This function is only called at 10 seconds or 0 seconds into the slot
disparity := params.BeaconConfig().MaximumGossipClockDisparityDuration()
if !features.Get().DisableReorgLateBlocks {
disparity += reorgLateBlockCountAttestations
}
disparity += reorgLateBlockCountAttestations
s.processAttestations(ctx, disparity)
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
start = time.Now()
// return early if we haven't changed head
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
if err != nil {
log.WithError(err).Error("Could not compute head from new attestations")
// Fallback to our current head root in the event of a failure.
s.headLock.RLock()
newHeadRoot = s.headRoot()
s.headLock.RUnlock()
return
}
if !s.isNewHead(newHeadRoot) {
return
}
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
if err != nil {
log.WithError(err).Error("could not get head block")
return
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
changed, err := s.forkchoiceUpdateWithExecution(s.ctx, newHeadRoot, proposingSlot)
if err != nil {
log.WithError(err).Error("could not update forkchoice")
fcuArgs := &fcuConfig{
headState: headState,
headRoot: newHeadRoot,
headBlock: headBlock,
proposingSlot: proposingSlot,
}
if changed {
s.headLock.RLock()
log.WithFields(logrus.Fields{
"oldHeadRoot": fmt.Sprintf("%#x", s.headRoot()),
"newHeadRoot": fmt.Sprintf("%#x", newHeadRoot),
}).Debug("Head changed due to attestations")
s.headLock.RUnlock()
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
log.WithError(err).Error("could not update forkchoice")
}
}

View File

@@ -112,7 +112,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
require.NoError(t, err)
require.Equal(t, 2, fcs.NodeCount())
@@ -168,7 +168,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
require.NoError(t, service.postBlockProcess(ctx, wsb, tRoot, postState, false))
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -33,8 +34,8 @@ var epochsSinceFinalitySaveHotStateDB = primitives.Epoch(100)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
BlockBeingSynced([32]byte) bool
@@ -56,7 +57,7 @@ type SlashingReceiver interface {
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()
// Return early if the block has been synced
@@ -72,6 +73,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err != nil {
return err
}
rob, err := blocks.NewROBlockWithRoot(block, blockRoot)
if err != nil {
return err
}
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
if err != nil {
@@ -106,10 +111,15 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err := eg.Wait(); err != nil {
return err
}
daStartTime := time.Now()
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
return errors.Wrap(err, "could not validate blob data availability")
if avs != nil {
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
return errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
}
} else {
if err := s.isDataAvailable(ctx, blockRoot, blockCopy); err != nil {
return errors.Wrap(err, "could not validate blob data availability")
}
}
daWaitedTime := time.Since(daStartTime)
@@ -119,7 +129,14 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
return errors.Wrap(err, "could not save post state info")
}
if err := s.postBlockProcess(ctx, blockCopy, blockRoot, postState, isValidPayload); err != nil {
args := &postBlockProcessConfig{
ctx: ctx,
signed: blockCopy,
blockRoot: blockRoot,
postState: postState,
isValidPayload: isValidPayload,
}
if err := s.postBlockProcess(args); err != nil {
err := errors.Wrap(err, "could not process block")
tracing.AnnotateError(span, err)
return err
@@ -196,7 +213,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()
@@ -204,7 +221,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
defer s.cfg.ForkChoiceStore.Unlock()
// Apply state transition on the incoming newly received block batches, one by one.
if err := s.onBlockBatch(ctx, blocks); err != nil {
if err := s.onBlockBatch(ctx, blocks, avs); err != nil {
err := errors.Wrap(err, "could not process block in batch")
tracing.AnnotateError(span, err)
return err

View File

@@ -8,6 +8,7 @@ import (
blockchainTesting "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -146,7 +147,7 @@ func TestService_ReceiveBlock(t *testing.T) {
require.NoError(t, err)
wsb, err := blocks.NewSignedBeaconBlock(tt.args.block)
require.NoError(t, err)
err = s.ReceiveBlock(ctx, wsb, root)
err = s.ReceiveBlock(ctx, wsb, root, nil)
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {
@@ -179,7 +180,7 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
go func() {
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.ReceiveBlock(ctx, wsb, root))
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
wg.Done()
}()
wg.Wait()
@@ -243,7 +244,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
require.NoError(t, err)
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb})
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, &das.MockAvailabilityStore{})
if tt.wantedErr != "" {
assert.ErrorContains(t, tt.wantedErr, err)
} else {

View File

@@ -17,6 +17,7 @@ go_library(
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/das:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/state:go_default_library",

View File

@@ -16,6 +16,7 @@ import (
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
@@ -208,7 +209,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock) error {
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error {
if s.State == nil {
return ErrNilState
}
@@ -238,7 +239,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte) error {
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error {
if s.ReceiveBlockMockErr != nil {
return s.ReceiveBlockMockErr
}
@@ -320,7 +321,7 @@ func (s *ChainService) PreviousJustifiedCheckpt() *ethpb.Checkpoint {
}
// ReceiveAttestation mocks ReceiveAttestation method in chain service.
func (_ *ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error {
func (*ChainService) ReceiveAttestation(_ context.Context, _ *ethpb.Attestation) error {
return nil
}
@@ -400,12 +401,12 @@ func (s *ChainService) RecentBlockSlot([32]byte) (primitives.Slot, error) {
}
// HeadGenesisValidatorsRoot mocks HeadGenesisValidatorsRoot method in chain service.
func (_ *ChainService) HeadGenesisValidatorsRoot() [32]byte {
func (*ChainService) HeadGenesisValidatorsRoot() [32]byte {
return [32]byte{}
}
// VerifyLmdFfgConsistency mocks VerifyLmdFfgConsistency and always returns nil.
func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
func (*ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attestation) error {
if !bytes.Equal(a.Data.BeaconBlockRoot, a.Data.Target.Root) {
return errors.New("LMD and FFG miss matched")
}
@@ -413,7 +414,7 @@ func (_ *ChainService) VerifyLmdFfgConsistency(_ context.Context, a *ethpb.Attes
}
// ChainHeads mocks ChainHeads and always return nil.
func (_ *ChainService) ChainHeads() ([][32]byte, []primitives.Slot) {
func (*ChainService) ChainHeads() ([][32]byte, []primitives.Slot) {
return [][32]byte{
bytesutil.ToBytes32(bytesutil.PadTo([]byte("foo"), 32)),
bytesutil.ToBytes32(bytesutil.PadTo([]byte("bar"), 32)),
@@ -422,7 +423,7 @@ func (_ *ChainService) ChainHeads() ([][32]byte, []primitives.Slot) {
}
// HeadPublicKeyToValidatorIndex mocks HeadPublicKeyToValidatorIndex and always return 0 and true.
func (_ *ChainService) HeadPublicKeyToValidatorIndex(_ [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
func (*ChainService) HeadPublicKeyToValidatorIndex(_ [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool) {
return 0, true
}
@@ -486,7 +487,7 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
}
// ReceiveAttesterSlashing mocks the same method in the chain service.
func (s *ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
func (*ChainService) ReceiveAttesterSlashing(context.Context, *ethpb.AttesterSlashing) {}
// IsFinalized mocks the same method in the chain service.
func (s *ChainService) IsFinalized(_ context.Context, blockRoot [32]byte) bool {
@@ -599,12 +600,12 @@ func (s *ChainService) ProposerBoost() [32]byte {
}
// FinalizedBlockHash mocks the same method in the chain service
func (s *ChainService) FinalizedBlockHash() [32]byte {
func (*ChainService) FinalizedBlockHash() [32]byte {
return [32]byte{}
}
// UnrealizedJustifiedPayloadBlockHash mocks the same method in the chain service
func (s *ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
func (*ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
return [32]byte{}
}

View File

@@ -4,6 +4,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
)
@@ -11,6 +12,9 @@ import (
// validators/prepare_proposer endpoint, of the proposer at the given slot.
// It only returns true if the tracked proposer is present and active.
func (s *Service) trackedProposer(st state.ReadOnlyBeaconState, slot primitives.Slot) (cache.TrackedValidator, bool) {
if features.Get().PrepareAllPayloads {
return cache.TrackedValidator{Active: true}, true
}
id, err := helpers.BeaconProposerIndexAtSlot(s.ctx, st, slot)
if err != nil {
return cache.TrackedValidator{}, false

View File

@@ -42,17 +42,15 @@ var (
// root would be for slot 32 if present.
type ProposerIndicesCache struct {
sync.Mutex
indices map[primitives.Epoch]map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex
unsafeIndices map[primitives.Epoch]map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex
rootMap map[forkchoicetypes.Checkpoint][32]byte // A map from checkpoint root to state root
indices map[primitives.Epoch]map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex
rootMap map[forkchoicetypes.Checkpoint][32]byte // A map from checkpoint root to state root
}
// NewProposerIndicesCache returns a newly created cache
func NewProposerIndicesCache() *ProposerIndicesCache {
return &ProposerIndicesCache{
indices: make(map[primitives.Epoch]map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex),
unsafeIndices: make(map[primitives.Epoch]map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex),
rootMap: make(map[forkchoicetypes.Checkpoint][32]byte),
indices: make(map[primitives.Epoch]map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex),
rootMap: make(map[forkchoicetypes.Checkpoint][32]byte),
}
}
@@ -74,18 +72,6 @@ func (p *ProposerIndicesCache) ProposerIndices(epoch primitives.Epoch, root [32]
return indices, exists
}
// UnsafeProposerIndices returns the proposer indices (unsafe) for the given root
func (p *ProposerIndicesCache) UnsafeProposerIndices(epoch primitives.Epoch, root [32]byte) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
p.Lock()
defer p.Unlock()
inner, ok := p.unsafeIndices[epoch]
if !ok {
return [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}, false
}
indices, exists := inner[root]
return indices, exists
}
// Prune resets the ProposerIndicesCache to its initial state
func (p *ProposerIndicesCache) Prune(epoch primitives.Epoch) {
p.Lock()
@@ -95,11 +81,6 @@ func (p *ProposerIndicesCache) Prune(epoch primitives.Epoch) {
delete(p.indices, key)
}
}
for key := range p.unsafeIndices {
if key < epoch {
delete(p.unsafeIndices, key)
}
}
for key := range p.rootMap {
if key.Epoch+1 < epoch {
delete(p.rootMap, key)
@@ -120,18 +101,6 @@ func (p *ProposerIndicesCache) Set(epoch primitives.Epoch, root [32]byte, indice
inner[root] = indices
}
// SetUnsafe sets the unsafe proposer indices for the given root as key
func (p *ProposerIndicesCache) SetUnsafe(epoch primitives.Epoch, root [32]byte, indices [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex) {
p.Lock()
defer p.Unlock()
inner, ok := p.unsafeIndices[epoch]
if !ok {
inner = make(map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex)
p.unsafeIndices[epoch] = inner
}
inner[root] = indices
}
// SetCheckpoint updates the map from checkpoints to state roots
func (p *ProposerIndicesCache) SetCheckpoint(c forkchoicetypes.Checkpoint, root [32]byte) {
p.Lock()

View File

@@ -34,29 +34,6 @@ func TestProposerCache_Set(t *testing.T) {
require.Equal(t, emptyIndices, received)
}
func TestProposerCache_SetUnsafe(t *testing.T) {
cache := NewProposerIndicesCache()
bRoot := [32]byte{'A'}
indices, ok := cache.UnsafeProposerIndices(0, bRoot)
require.Equal(t, false, ok)
emptyIndices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
require.Equal(t, indices, emptyIndices, "Expected committee count not to exist in empty cache")
emptyIndices[0] = 1
cache.SetUnsafe(0, bRoot, emptyIndices)
received, ok := cache.UnsafeProposerIndices(0, bRoot)
require.Equal(t, true, ok)
require.Equal(t, received, emptyIndices)
newRoot := [32]byte{'B'}
copy(emptyIndices[3:], []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6})
cache.SetUnsafe(0, newRoot, emptyIndices)
received, ok = cache.UnsafeProposerIndices(0, newRoot)
require.Equal(t, true, ok)
require.Equal(t, emptyIndices, received)
}
func TestProposerCache_CheckpointAndPrune(t *testing.T) {
cache := NewProposerIndicesCache()
indices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
@@ -65,7 +42,6 @@ func TestProposerCache_CheckpointAndPrune(t *testing.T) {
copy(indices[3:], []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6})
for i := 1; i < 10; i++ {
cache.Set(primitives.Epoch(i), root, indices)
cache.SetUnsafe(primitives.Epoch(i), root, indices)
cache.SetCheckpoint(forkchoicetypes.Checkpoint{Epoch: primitives.Epoch(i - 1), Root: cpRoot}, root)
}
received, ok := cache.ProposerIndices(1, root)
@@ -80,18 +56,6 @@ func TestProposerCache_CheckpointAndPrune(t *testing.T) {
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.UnsafeProposerIndices(1, root)
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.UnsafeProposerIndices(4, root)
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.UnsafeProposerIndices(9, root)
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: cpRoot})
require.Equal(t, true, ok)
require.Equal(t, indices, received)
@@ -123,18 +87,6 @@ func TestProposerCache_CheckpointAndPrune(t *testing.T) {
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.UnsafeProposerIndices(1, root)
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)
received, ok = cache.UnsafeProposerIndices(4, root)
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)
received, ok = cache.UnsafeProposerIndices(9, root)
require.Equal(t, true, ok)
require.Equal(t, indices, received)
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: cpRoot})
require.Equal(t, false, ok)
require.Equal(t, emptyIndices, received)

View File

@@ -26,6 +26,12 @@ const (
// BlobSidecarReceived is sent after a blob sidecar is received from gossip or rpc.
BlobSidecarReceived = 6
// ProposerSlashingReceived is sent after a proposer slashing is received from gossip or rpc
ProposerSlashingReceived = 7
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
AttesterSlashingReceived = 8
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -61,3 +67,13 @@ type BLSToExecutionChangeReceivedData struct {
type BlobSidecarReceivedData struct {
Blob *blocks.VerifiedROBlob
}
// ProposerSlashingReceivedData is the data sent with ProposerSlashingReceived events.
type ProposerSlashingReceivedData struct {
ProposerSlashing *ethpb.ProposerSlashing
}
// AttesterSlashingReceivedData is the data sent with AttesterSlashingReceived events.
type AttesterSlashingReceivedData struct {
AttesterSlashing *ethpb.AttesterSlashing
}

View File

@@ -64,6 +64,7 @@ go_test(
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/fieldparams:go_default_library",

View File

@@ -391,49 +391,6 @@ func UpdateCachedCheckpointToStateRoot(state state.ReadOnlyBeaconState, cp *fork
return nil
}
// UpdateUnsafeProposerIndicesInCache updates proposer indices entry of the
// cache one epoch in advance.
// Input state is used to retrieve active validator indices.
// Input root is to use as key in the cache.
// Input epoch is the epoch to retrieve proposer indices for.
func UpdateUnsafeProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
// The cache uses the state root at the end of (current epoch - 2) as key.
// (e.g. for epoch 2, the key is root at slot 31)
if epoch <= params.BeaconConfig().GenesisEpoch+2*params.BeaconConfig().MinSeedLookahead {
return nil
}
slot, err := slots.EpochEnd(epoch - 2)
if err != nil {
return err
}
root, err := state.StateRootAtIndex(uint64(slot % params.BeaconConfig().SlotsPerHistoricalRoot))
if err != nil {
return err
}
// Skip cache update if the key already exists
_, ok := proposerIndicesCache.UnsafeProposerIndices(epoch, [32]byte(root))
if ok {
return nil
}
indices, err := ActiveValidatorIndices(ctx, state, epoch)
if err != nil {
return err
}
proposerIndices, err := precomputeProposerIndices(state, indices, epoch)
if err != nil {
return err
}
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
return errors.New("invalid proposer length returned from state")
}
// This is here to deal with tests only
var indicesArray [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex
copy(indicesArray[:], proposerIndices)
proposerIndicesCache.Prune(epoch - 2)
proposerIndicesCache.SetUnsafe(epoch, [32]byte(root), indicesArray)
return nil
}
// ClearCache clears the beacon committee cache and sync committee cache.
func ClearCache() {
committeeCache.Clear()

View File

@@ -9,6 +9,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -271,11 +272,22 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
func cachedProposerIndexAtSlot(slot primitives.Slot, root [32]byte) (primitives.ValidatorIndex, error) {
proposerIndices, has := proposerIndicesCache.ProposerIndices(slots.ToEpoch(slot), root)
if !has {
cache.ProposerIndicesCacheMiss.Inc()
return 0, errProposerIndexMiss
}
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
cache.ProposerIndicesCacheMiss.Inc()
return 0, errProposerIndexMiss
}
return proposerIndices[slot%params.BeaconConfig().SlotsPerEpoch], nil
}
// ProposerIndexAtSlotFromCheckpoint returns the proposer index at the given
// slot from the cache at the given checkpoint
func ProposerIndexAtSlotFromCheckpoint(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, error) {
proposerIndices, has := proposerIndicesCache.IndicesFromCheckpoint(*c)
if !has {
return 0, errProposerIndexMiss
}
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
return 0, errProposerIndexMiss
}
return proposerIndices[slot%params.BeaconConfig().SlotsPerEpoch], nil
@@ -302,7 +314,7 @@ func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconSt
return pid, nil
}
if err := UpdateProposerIndicesInCache(ctx, state, e); err != nil {
return 0, errors.Wrap(err, "could not update committee cache")
return 0, errors.Wrap(err, "could not update proposer index cache")
}
pid, err = cachedProposerIndexAtSlot(slot, [32]byte(r))
if err == nil {

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -802,3 +803,18 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
require.NoError(t, err)
require.Equal(t, index, primitives.ValidatorIndex(3))
}
func TestProposerIndexFromCheckpoint(t *testing.T) {
e := primitives.Epoch(2)
r := [32]byte{'a'}
root := [32]byte{'b'}
ids := [32]primitives.ValidatorIndex{}
slot := primitives.Slot(69) // slot 5 in the Epoch
ids[5] = primitives.ValidatorIndex(19)
proposerIndicesCache.Set(e, r, ids)
c := &forkchoicetypes.Checkpoint{Root: root, Epoch: e - 1}
proposerIndicesCache.SetCheckpoint(*c, r)
id, err := ProposerIndexAtSlotFromCheckpoint(c, slot)
require.NoError(t, err)
require.Equal(t, ids[5], id)
}

View File

@@ -0,0 +1,49 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"availability.go",
"cache.go",
"iface.go",
"mock.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/das",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//runtime/logging:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"availability_test.go",
"cache_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -0,0 +1,149 @@
package das
import (
"context"
"fmt"
errors "github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/runtime/logging"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
log "github.com/sirupsen/logrus"
)
var (
errMixedRoots = errors.New("BlobSidecars must all be for the same block")
)
// LazilyPersistentStore is an implementation of AvailabilityStore to be used when batch syncing.
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
// block, at which time they will undergo full verification and be saved to the disk.
type LazilyPersistentStore struct {
store *filesystem.BlobStorage
cache *cache
verifier BlobBatchVerifier
}
var _ AvailabilityStore = &LazilyPersistentStore{}
// BlobBatchVerifier enables LazyAvailabilityStore to manage the verification process
// going from ROBlob->VerifiedROBlob, while avoiding the decision of which individual verifications
// to run and in what order. Since LazilyPersistentStore always tries to verify and save blobs only when
// they are all available, the interface takes a slice of blobs, enabling the implementation to optimize
// batch verification.
type BlobBatchVerifier interface {
VerifiedROBlobs(ctx context.Context, blk blocks.ROBlock, sc []blocks.ROBlob) ([]blocks.VerifiedROBlob, error)
}
// NewLazilyPersistentStore creates a new LazilyPersistentStore. This constructor should always be used
// when creating a LazilyPersistentStore because it needs to initialize the cache under the hood.
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier) *LazilyPersistentStore {
return &LazilyPersistentStore{
store: store,
cache: newCache(),
verifier: verifier,
}
}
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
// by the given block are guaranteed to be persisted for the remainder of the retention period.
func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
if len(sc) == 0 {
return nil
}
if len(sc) > 1 {
first := sc[0].BlockRoot()
for i := 1; i < len(sc); i++ {
if first != sc[i].BlockRoot() {
return errMixedRoots
}
}
}
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
return nil
}
key := keyFromSidecar(sc[0])
entry := s.cache.ensure(key)
for i := range sc {
if err := entry.stash(&sc[i]); err != nil {
return err
}
}
return nil
}
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
// BlobSidecars already in the db are assumed to have been previously verified against the block.
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
blockCommitments, err := commitmentsToCheck(b, current)
if err != nil {
return errors.Wrapf(err, "could check data availability for block %#x", b.Root())
}
// Return early for blocks that are pre-deneb or which do not have any commitments.
if blockCommitments.count() == 0 {
return nil
}
key := keyFromBlock(b)
entry := s.cache.ensure(key)
defer s.cache.delete(key)
root := b.Root()
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.
sidecars, err := entry.filter(root, blockCommitments)
if err != nil {
return errors.Wrap(err, "incomplete BlobSidecar batch")
}
// Do thorough verifications of each BlobSidecar for the block.
// Same as above, we don't save BlobSidecars if there are any problems with the batch.
vscs, err := s.verifier.VerifiedROBlobs(ctx, b, sidecars)
if err != nil {
var me verification.VerificationMultiError
ok := errors.As(err, &me)
if ok {
fails := me.Failures()
lf := make(log.Fields, len(fails))
for i := range fails {
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
}
log.WithFields(lf).WithFields(logging.BlockFieldsFromBlob(sidecars[0])).
Debug("invalid BlobSidecars received")
}
return errors.Wrapf(err, "invalid BlobSidecars received for block %#x", root)
}
// Ensure that each BlobSidecar is written to disk.
for i := range vscs {
if err := s.store.Save(vscs[i]); err != nil {
return errors.Wrapf(err, "failed to save BlobSidecar index %d for block %#x", vscs[i].Index, root)
}
}
// All BlobSidecars are persisted - da check succeeds.
return nil
}
func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) (safeCommitmentArray, error) {
var ar safeCommitmentArray
if b.Version() < version.Deneb {
return ar, nil
}
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
return ar, nil
}
kc, err := b.Block().Body().BlobKzgCommitments()
if err != nil {
return ar, err
}
if len(kc) > len(ar) {
return ar, errIndexOutOfBounds
}
copy(ar[:], kc)
return ar, nil
}

View File

@@ -0,0 +1,214 @@
package das
import (
"bytes"
"context"
"testing"
errors "github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
func Test_commitmentsToCheck(t *testing.T) {
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
require.NoError(t, err)
commits := [][]byte{
bytesutil.PadTo([]byte("a"), 48),
bytesutil.PadTo([]byte("b"), 48),
bytesutil.PadTo([]byte("c"), 48),
bytesutil.PadTo([]byte("d"), 48),
}
cases := []struct {
name string
commits [][]byte
block func(*testing.T) blocks.ROBlock
slot primitives.Slot
err error
}{
{
name: "pre deneb",
block: func(t *testing.T) blocks.ROBlock {
bb := util.NewBeaconBlockBellatrix()
sb, err := blocks.NewSignedBeaconBlock(bb)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
},
},
{
name: "commitments within da",
block: func(t *testing.T) blocks.ROBlock {
d := util.NewBeaconBlockDeneb()
d.Block.Body.BlobKzgCommitments = commits
d.Block.Slot = 100
sb, err := blocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
},
commits: commits,
slot: 100,
},
{
name: "commitments outside da",
block: func(t *testing.T) blocks.ROBlock {
d := util.NewBeaconBlockDeneb()
// block is from slot 0, "current slot" is window size +1 (so outside the window)
d.Block.Body.BlobKzgCommitments = commits
sb, err := blocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
},
slot: windowSlots + 1,
},
{
name: "excessive commitments",
block: func(t *testing.T) blocks.ROBlock {
d := util.NewBeaconBlockDeneb()
d.Block.Slot = 100
// block is from slot 0, "current slot" is window size +1 (so outside the window)
d.Block.Body.BlobKzgCommitments = commits
// Double the number of commitments, assert that this is over the limit
d.Block.Body.BlobKzgCommitments = append(commits, d.Block.Body.BlobKzgCommitments...)
sb, err := blocks.NewSignedBeaconBlock(d)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
c, err := rb.Block().Body().BlobKzgCommitments()
require.NoError(t, err)
require.Equal(t, true, len(c) > fieldparams.MaxBlobsPerBlock)
return rb
},
slot: windowSlots + 1,
err: errIndexOutOfBounds,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
b := c.block(t)
co, err := commitmentsToCheck(b, c.slot)
if c.err != nil {
require.ErrorIs(t, err, c.err)
} else {
require.NoError(t, err)
}
require.Equal(t, len(c.commits), co.count())
for i := 0; i < len(c.commits); i++ {
require.Equal(t, true, bytes.Equal(c.commits[i], co[i]))
}
})
}
}
func daAlwaysSucceeds(_ [][]byte, _ []*ethpb.BlobSidecar) error {
return nil
}
type mockDA struct {
t *testing.T
scs []blocks.ROBlob
err error
}
func TestLazilyPersistent_Missing(t *testing.T) {
ctx := context.Background()
store := filesystem.NewEphemeralBlobStorage(t)
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
mbv := &mockBlobBatchVerifier{t: t, scs: scs}
as := NewLazilyPersistentStore(store, mbv)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(1, scs[2]))
err := as.IsDataAvailable(ctx, 1, blk)
require.ErrorIs(t, err, errMissingSidecar)
// All but one persisted, return missing idx
require.NoError(t, as.Persist(1, scs[0]))
err = as.IsDataAvailable(ctx, 1, blk)
require.ErrorIs(t, err, errMissingSidecar)
// All persisted, return nil
require.NoError(t, as.Persist(1, scs...))
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
}
func TestLazilyPersistent_Mismatch(t *testing.T) {
ctx := context.Background()
store := filesystem.NewEphemeralBlobStorage(t)
blk, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
scs[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
as := NewLazilyPersistentStore(store, mbv)
// Only one commitment persisted, should return error with other indices
require.NoError(t, as.Persist(1, scs[0]))
err := as.IsDataAvailable(ctx, 1, blk)
require.NotNil(t, err)
require.ErrorIs(t, err, errCommitmentMismatch)
}
func TestLazyPersistOnceCommitted(t *testing.T) {
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
// stashes as expected
require.NoError(t, as.Persist(1, scs...))
// ignores duplicates
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
// ignores index out of bound
scs[0].Index = 6
require.ErrorIs(t, as.Persist(1, scs[0]), errIndexOutOfBounds)
_, more := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
// ignores sidecars before the retention period
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
require.NoError(t, err)
require.NoError(t, as.Persist(32+slotOOB, more[0]))
// doesn't ignore new sidecars with a different block root
require.NoError(t, as.Persist(1, more...))
}
type mockBlobBatchVerifier struct {
t *testing.T
scs []blocks.ROBlob
err error
verified map[[32]byte]primitives.Slot
}
var _ BlobBatchVerifier = &mockBlobBatchVerifier{}
func (m *mockBlobBatchVerifier) VerifiedROBlobs(_ context.Context, _ blocks.ROBlock, scs []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
require.Equal(m.t, len(scs), len(m.scs))
for i := range m.scs {
require.Equal(m.t, m.scs[i], scs[i])
}
vscs := verification.FakeVerifySliceForTest(m.t, scs)
return vscs, m.err
}
func (m *mockBlobBatchVerifier) MarkVerified(root [32]byte, slot primitives.Slot) {
if m.verified == nil {
m.verified = make(map[[32]byte]primitives.Slot)
}
m.verified[root] = slot
}

117
beacon-chain/das/cache.go Normal file
View File

@@ -0,0 +1,117 @@
package das
import (
"bytes"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
)
var (
ErrDuplicateSidecar = errors.New("duplicate sidecar stashed in AvailabilityStore")
errIndexOutOfBounds = errors.New("sidecar.index > MAX_BLOBS_PER_BLOCK")
errCommitmentMismatch = errors.New("KzgCommitment of sidecar in cache did not match block commitment")
errMissingSidecar = errors.New("no sidecar in cache for block commitment")
)
// cacheKey includes the slot so that we can easily iterate through the cache and compare
// slots for eviction purposes. Whether the input is the block or the sidecar, we always have
// the root+slot when interacting with the cache, so it isn't an inconvenience to use both.
type cacheKey struct {
slot primitives.Slot
root [32]byte
}
type cache struct {
entries map[cacheKey]*cacheEntry
}
func newCache() *cache {
return &cache{entries: make(map[cacheKey]*cacheEntry)}
}
// keyFromSidecar is a convenience method for constructing a cacheKey from a BlobSidecar value.
func keyFromSidecar(sc blocks.ROBlob) cacheKey {
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
}
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
func keyFromBlock(b blocks.ROBlock) cacheKey {
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
}
// ensure returns the entry for the given key, creating it if it isn't already present.
func (c *cache) ensure(key cacheKey) *cacheEntry {
e, ok := c.entries[key]
if !ok {
e = &cacheEntry{}
c.entries[key] = e
}
return e
}
// delete removes the cache entry from the cache.
func (c *cache) delete(key cacheKey) {
delete(c.entries, key)
}
// cacheEntry holds a fixed-length cache of BlobSidecars.
type cacheEntry struct {
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
}
// stash adds an item to the in-memory cache of BlobSidecars.
// Only the first BlobSidecar of a given Index will be kept in the cache.
// stash will return an error if the given blob is already in the cache, or if the Index is out of bounds.
func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
if sc.Index >= fieldparams.MaxBlobsPerBlock {
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.Index)
}
if e.scs[sc.Index] != nil {
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitment)
}
e.scs[sc.Index] = sc
return nil
}
// filter evicts sidecars that are not committed to by the block and returns custom
// errors if the cache is missing any of the commitments, or if the commitments in
// the cache do not match those found in the block. If err is nil, then all expected
// commitments were found in the cache and the sidecar slice return value can be used
// to perform a DA check against the cached sidecars.
func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROBlob, error) {
scs := make([]blocks.ROBlob, kc.count())
for i := uint64(0); i < fieldparams.MaxBlobsPerBlock; i++ {
if kc[i] == nil {
if e.scs[i] != nil {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
}
continue
}
if e.scs[i] == nil {
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
}
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
}
scs[i] = *e.scs[i]
}
return scs, nil
}
// safeCommitemntArray is a fixed size array of commitment byte slices. This is helpful for avoiding
// gratuitous bounds checks.
type safeCommitmentArray [fieldparams.MaxBlobsPerBlock][]byte
func (s safeCommitmentArray) count() int {
for i := range s {
if s[i] == nil {
return i
}
}
return fieldparams.MaxBlobsPerBlock
}

View File

@@ -0,0 +1,25 @@
package das
import (
"testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestCacheEnsureDelete(t *testing.T) {
c := newCache()
require.Equal(t, 0, len(c.entries))
root := bytesutil.ToBytes32([]byte("root"))
slot := primitives.Slot(1234)
k := cacheKey{root: root, slot: slot}
entry := c.ensure(k)
require.Equal(t, 1, len(c.entries))
require.Equal(t, c.entries[k], entry)
c.delete(k)
require.Equal(t, 0, len(c.entries))
var nilEntry *cacheEntry
require.Equal(t, nilEntry, c.entries[k])
}

19
beacon-chain/das/iface.go Normal file
View File

@@ -0,0 +1,19 @@
package das
import (
"context"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
)
// AvailabilityStore describes a component that can verify and save sidecars for a given block, and confirm previously
// verified and saved sidecars.
// Persist guarantees that the sidecar will be available to perform a DA check
// for the life of the beacon node process.
// IsDataAvailable guarantees that all blobs committed to in the block have been
// durably persisted before returning a non-error value.
type AvailabilityStore interface {
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
Persist(current primitives.Slot, sc ...blocks.ROBlob) error
}

32
beacon-chain/das/mock.go Normal file
View File

@@ -0,0 +1,32 @@
package das
import (
"context"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
)
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
type MockAvailabilityStore struct {
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
PersistBlobsCallback func(current primitives.Slot, sc ...blocks.ROBlob) error
}
var _ AvailabilityStore = &MockAvailabilityStore{}
// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
if m.VerifyAvailabilityCallback != nil {
return m.VerifyAvailabilityCallback(ctx, current, b)
}
return nil
}
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROBlob) error {
if m.PersistBlobsCallback != nil {
return m.PersistBlobsCallback(current, sc...)
}
return nil
}

View File

@@ -6,6 +6,7 @@ go_library(
"blob.go",
"ephemeral.go",
"metrics.go",
"pruner.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem",
visibility = ["//visibility:public"],
@@ -19,7 +20,6 @@ go_library(
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/logging:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -30,7 +30,10 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["blob_test.go"],
srcs = [
"blob_test.go",
"pruner_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/verification:go_default_library",
@@ -40,7 +43,6 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_spf13_afero//:go_default_library",

View File

@@ -1,27 +1,21 @@
package filesystem
import (
"encoding/binary"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/io/file"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/logging"
"github.com/prysmaticlabs/prysm/v4/time/slots"
log "github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
@@ -34,18 +28,21 @@ const (
sszExt = "ssz"
partExt = "part"
firstPruneEpoch = 0
bufferEpochs = 2
directoryPermissions = 0700
)
// BlobStorageOption is a functional option for configuring a BlobStorage.
type BlobStorageOption func(*BlobStorage)
type BlobStorageOption func(*BlobStorage) error
// WithBlobRetentionEpochs is an option that changes the number of epochs blobs will be persisted.
func WithBlobRetentionEpochs(e primitives.Epoch) BlobStorageOption {
return func(b *BlobStorage) {
b.retentionEpochs = e
return func(b *BlobStorage) error {
pruner, err := newBlobPruner(b.fs, e)
if err != nil {
return err
}
b.pruner = pruner
return nil
}
}
@@ -59,21 +56,36 @@ func NewBlobStorage(base string, opts ...BlobStorageOption) (*BlobStorage, error
}
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
b := &BlobStorage{
fs: fs,
retentionEpochs: params.BeaconConfig().MinEpochsForBlobsSidecarsRequest,
lastPrunedEpoch: firstPruneEpoch,
fs: fs,
}
for _, o := range opts {
o(b)
if err := o(b); err != nil {
return nil, fmt.Errorf("failed to create blob storage at %s: %w", base, err)
}
}
if b.pruner == nil {
log.Warn("Initializing blob filesystem storage with pruning disabled")
}
return b, nil
}
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
type BlobStorage struct {
fs afero.Fs
retentionEpochs primitives.Epoch
lastPrunedEpoch primitives.Epoch
fs afero.Fs
pruner *blobPruner
}
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
// will be populated at node startup, avoiding a costly cold prune (~4s in syscalls) during syncing.
func (bs *BlobStorage) WarmCache() {
if bs.pruner == nil {
return
}
go func() {
if err := bs.pruner.prune(0); err != nil {
log.WithError(err).Error("Error encountered while warming up blob pruner cache.")
}
}()
}
// Save saves blobs given a list of sidecars.
@@ -89,13 +101,8 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("ignoring a duplicate blob sidecar Save attempt")
return nil
}
if bs.shouldPrune(sidecar.Slot()) {
go func() {
err := bs.pruneOlderThan(sidecar.Slot())
if err != nil {
log.WithError(err).Errorf("failed to prune blobs from slot %d", sidecar.Slot())
}
}()
if bs.pruner != nil {
bs.pruner.notify(sidecar.BlockRoot(), sidecar.Slot())
}
// Serialize the ethpb.BlobSidecar to binary data using SSZ.
@@ -108,8 +115,12 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
}
partPath := fname.partPath()
partialMoved := false
// Ensure the partial file is deleted.
defer func() {
if partialMoved {
return
}
// It's expected to error if the save is successful.
err = bs.fs.Remove(partPath)
if err == nil {
@@ -143,7 +154,8 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
if err != nil {
return errors.Wrap(err, "failed to rename partial file to final name")
}
blobsTotalGauge.Inc()
partialMoved = true
blobsWrittenCounter.Inc()
blobSaveLatency.Observe(time.Since(startTime).Seconds())
return nil
}
@@ -220,7 +232,7 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
}
func (p blobNamer) dir() string {
return fmt.Sprintf("%#x", p.root)
return rootString(p.root)
}
func (p blobNamer) fname(ext string) string {
@@ -235,117 +247,6 @@ func (p blobNamer) path() string {
return p.fname(sszExt)
}
// Prune prunes blobs in the base directory based on the retention epoch.
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
func (bs *BlobStorage) Prune(currentSlot primitives.Slot) error {
t := time.Now()
retentionSlots, err := slots.EpochStart(bs.retentionEpochs + bufferEpochs)
if err != nil {
return err
}
if currentSlot < retentionSlots {
return nil // Overflow would occur
}
log.Debug("Pruning old blobs")
folders, err := afero.ReadDir(bs.fs, ".")
if err != nil {
return err
}
var totalPruned int
for _, folder := range folders {
if folder.IsDir() {
num, err := bs.processFolder(folder, currentSlot, retentionSlots)
if err != nil {
return err
}
blobsPrunedCounter.Add(float64(num))
blobsTotalGauge.Add(-float64(num))
totalPruned += num
}
}
pruneTime := time.Since(t)
log.WithFields(log.Fields{
"lastPrunedEpoch": slots.ToEpoch(currentSlot - retentionSlots),
"pruneTime": pruneTime,
"numberBlobsPruned": totalPruned,
}).Debug("Pruned old blobs")
return nil
}
// processFolder will delete the folder of blobs if the blob slot is outside the
// retention period. We determine the slot by looking at the first blob in the folder.
func (bs *BlobStorage) processFolder(folder os.FileInfo, currentSlot, retentionSlots primitives.Slot) (int, error) {
f, err := bs.fs.Open(filepath.Join(folder.Name(), "0."+sszExt))
if err != nil {
return 0, err
}
defer func() {
if err := f.Close(); err != nil {
log.WithError(err).Errorf("Could not close blob file")
}
}()
slot, err := slotFromBlob(f)
if err != nil {
return 0, err
}
var num int
if slot < (currentSlot - retentionSlots) {
num, err = bs.countFiles(folder.Name())
if err != nil {
return 0, err
}
if err = bs.fs.RemoveAll(folder.Name()); err != nil {
return 0, errors.Wrapf(err, "failed to delete blob %s", f.Name())
}
}
return num, nil
}
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
// preceding the slot information within SignedBeaconBlockHeader.
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
b := make([]byte, 8)
_, err := at.ReadAt(b, 131176)
if err != nil {
return 0, err
}
rawSlot := binary.LittleEndian.Uint64(b)
return primitives.Slot(rawSlot), nil
}
// Delete removes the directory matching the provided block root and all the blobs it contains.
func (bs *BlobStorage) Delete(root [32]byte) error {
if err := bs.fs.RemoveAll(hexutil.Encode(root[:])); err != nil {
return fmt.Errorf("failed to delete blobs for root %#x: %w", root, err)
}
return nil
}
// shouldPrune checks whether pruning should be triggered based on the given slot.
func (bs *BlobStorage) shouldPrune(slot primitives.Slot) bool {
if slots.SinceEpochStarts(slot) < params.BeaconConfig().SlotsPerEpoch/2 {
return false
}
if slots.ToEpoch(slot) == bs.lastPrunedEpoch {
return false
}
return true
}
// pruneOlderThan prunes blobs in the base directory based on the retention epoch and current slot.
func (bs *BlobStorage) pruneOlderThan(slot primitives.Slot) error {
err := bs.Prune(slot)
if err != nil {
return err
}
// Update lastPrunedEpoch to the current epoch.
bs.lastPrunedEpoch = slots.ToEpoch(slot)
return nil
func rootString(root [32]byte) string {
return fmt.Sprintf("%#x", root)
}

View File

@@ -7,7 +7,6 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
@@ -74,48 +73,6 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
require.NoError(t, err)
require.DeepSSZEqual(t, expected, actual)
})
t.Run("check pruning", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
// Slot in first half of epoch therefore should not prune
require.Equal(t, false, bs.shouldPrune(testSidecars[0].Slot()))
err = bs.Save(testSidecars[0])
require.NoError(t, err)
actual, err := bs.Get(testSidecars[0].BlockRoot(), testSidecars[0].Index)
require.NoError(t, err)
require.DeepSSZEqual(t, testSidecars[0], actual)
err = pollUntil(t, fs, 1)
require.NoError(t, err)
_, sidecars = util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 33, fieldparams.MaxBlobsPerBlock)
testSidecars1, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
// Slot in first half of epoch therefore should not prune
require.Equal(t, false, bs.shouldPrune(testSidecars1[0].Slot()))
err = bs.Save(testSidecars1[0])
require.NoError(t, err)
// Check previous saved sidecar was not pruned
actual, err = bs.Get(testSidecars[0].BlockRoot(), testSidecars[0].Index)
require.NoError(t, err)
require.DeepSSZEqual(t, testSidecars[0], actual)
// Check latest sidecar exists
actual, err = bs.Get(testSidecars1[0].BlockRoot(), testSidecars1[0].Index)
require.NoError(t, err)
require.DeepSSZEqual(t, testSidecars1[0], actual)
err = pollUntil(t, fs, 2) // Check correct number of files
require.NoError(t, err)
_, sidecars = util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 131187, fieldparams.MaxBlobsPerBlock)
testSidecars, err = verification.BlobSidecarSliceNoop(sidecars)
// Slot in second half of epoch therefore should prune
require.Equal(t, true, bs.shouldPrune(testSidecars[0].Slot()))
require.NoError(t, err)
err = bs.Save(testSidecars[0])
require.NoError(t, err)
err = pollUntil(t, fs, 1)
require.NoError(t, err)
})
}
// pollUntil polls a condition function until it returns true or a timeout is reached.
@@ -143,26 +100,6 @@ func pollUntil(t *testing.T, fs afero.Fs, expected int) error {
return nil
}
func TestShouldPrune(t *testing.T) {
bs := NewEphemeralBlobStorage(t)
bs.lastPrunedEpoch = 5
// Slot is before the midpoint of the epoch
slot1 := primitives.Slot(100)
p1 := bs.shouldPrune(slot1)
require.Equal(t, false, p1)
// Slot is after the midpoint of the epoch, but same epoch as last pruning
slot2 := primitives.Slot(178)
p2 := bs.shouldPrune(slot2)
require.Equal(t, false, p2)
// Slot is after the midpoint of the epoch
slot3 := primitives.Slot(8018)
p3 := bs.shouldPrune(slot3)
require.Equal(t, true, p3)
}
func TestBlobIndicesBounds(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
@@ -208,7 +145,22 @@ func TestBlobStoragePrune(t *testing.T) {
require.NoError(t, bs.Save(sidecar))
}
require.NoError(t, bs.Prune(currentSlot))
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
remainingFolders, err := afero.ReadDir(fs, ".")
require.NoError(t, err)
require.Equal(t, 0, len(remainingFolders))
})
t.Run("Prune dangling blob", func(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 299, fieldparams.MaxBlobsPerBlock)
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
for _, sidecar := range testSidecars[4:] {
require.NoError(t, bs.Save(sidecar))
}
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
remainingFolders, err := afero.ReadDir(fs, ".")
require.NoError(t, err)
@@ -216,7 +168,7 @@ func TestBlobStoragePrune(t *testing.T) {
})
t.Run("PruneMany", func(t *testing.T) {
blockQty := 10
slot := primitives.Slot(0)
slot := primitives.Slot(1)
for j := 0; j <= blockQty; j++ {
root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32))
@@ -228,7 +180,7 @@ func TestBlobStoragePrune(t *testing.T) {
slot += 10000
}
require.NoError(t, bs.Prune(currentSlot))
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
remainingFolders, err := afero.ReadDir(fs, ".")
require.NoError(t, err)
@@ -257,41 +209,11 @@ func BenchmarkPruning(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := bs.Prune(currentSlot)
err := bs.pruner.prune(currentSlot)
require.NoError(b, err)
}
}
func TestBlobStorageDelete(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
rawRoot := "0xcf9bb70c98f58092c9d6459227c9765f984d240be9690e85179bc5a6f60366ad"
blockRoot, err := hexutil.Decode(rawRoot)
require.NoError(t, err)
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, fieldparams.MaxBlobsPerBlock)
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
for _, sidecar := range testSidecars {
require.NoError(t, bs.Save(sidecar))
}
exists, err := afero.DirExists(fs, hexutil.Encode(blockRoot))
require.NoError(t, err)
require.Equal(t, true, exists)
// Delete the directory corresponding to the block root
require.NoError(t, bs.Delete(bytesutil.ToBytes32(blockRoot)))
// Ensure that the directory no longer exists after deletion
exists, err = afero.DirExists(fs, hexutil.Encode(blockRoot))
require.NoError(t, err)
require.Equal(t, false, exists)
// Deleting a non-existent root does not return an error.
require.NoError(t, bs.Delete(bytesutil.ToBytes32([]byte{0x1})))
}
func TestNewBlobStorage(t *testing.T) {
_, err := NewBlobStorage(path.Join(t.TempDir(), "good"))
require.NoError(t, err)

View File

@@ -10,16 +10,24 @@ import (
// NewEphemeralBlobStorage should only be used for tests.
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
// improving test performance and simplifying cleanup.
func NewEphemeralBlobStorage(_ testing.TB) *BlobStorage {
return &BlobStorage{fs: afero.NewMemMapFs()}
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
fs := afero.NewMemMapFs()
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
if err != nil {
t.Fatal("test setup issue", err)
}
return &BlobStorage{fs: fs, pruner: pruner}
}
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
// in order to interact with it outside the parameters of the BlobStorage api.
func NewEphemeralBlobStorageWithFs(_ testing.TB) (afero.Fs, *BlobStorage, error) {
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage, error) {
fs := afero.NewMemMapFs()
retentionEpoch := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
return fs, &BlobStorage{fs: fs, retentionEpochs: retentionEpoch}, nil
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
if err != nil {
t.Fatal("test setup issue", err)
}
return fs, &BlobStorage{fs: fs, pruner: pruner}, nil
}
type BlobMocker struct {

View File

@@ -1,70 +1,28 @@
package filesystem
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/spf13/afero"
)
var (
blobBuckets = []float64{0.00003, 0.00005, 0.00007, 0.00009, 0.00011, 0.00013, 0.00015}
blobSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "blob_storage_save_latency",
Help: "Latency of blob storage save operations in seconds",
Help: "Latency of BlobSidecar storage save operations in seconds",
Buckets: blobBuckets,
})
blobFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "blob_storage_get_latency",
Help: "Latency of blob storage get operations in seconds",
Help: "Latency of BlobSidecar storage get operations in seconds",
Buckets: blobBuckets,
})
blobsPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "blob_pruned_blobs_total",
Help: "Total number of pruned blobs.",
Name: "blob_pruned",
Help: "Number of BlobSidecar files pruned.",
})
blobsTotalGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "blobs_on_disk_total",
Help: "Total number of blobs in filesystem.",
blobsWrittenCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "blobs_written",
Help: "Number of BlobSidecar files written.",
})
)
func (bs *BlobStorage) Initialize(lastFinalizedSlot primitives.Slot) error {
if err := bs.Prune(lastFinalizedSlot); err != nil {
return fmt.Errorf("failed to prune from finalized slot %d: %w", lastFinalizedSlot, err)
}
if err := bs.collectTotalBlobMetric(); err != nil {
return fmt.Errorf("failed to initialize blob metrics: %w", err)
}
return nil
}
// CollectTotalBlobMetric set the number of blobs currently present in the filesystem
// to the blobsTotalGauge metric.
func (bs *BlobStorage) collectTotalBlobMetric() error {
totalBlobs := 0
folders, err := afero.ReadDir(bs.fs, ".")
if err != nil {
return err
}
for _, folder := range folders {
num, err := bs.countFiles(folder.Name())
if err != nil {
return err
}
totalBlobs = totalBlobs + num
}
blobsTotalGauge.Set(float64(totalBlobs))
return nil
}
// countFiles returns the length of blob files for a given directory.
func (bs *BlobStorage) countFiles(folderName string) (int, error) {
files, err := afero.ReadDir(bs.fs, folderName)
if err != nil {
return 0, err
}
return len(files), nil
}

View File

@@ -0,0 +1,274 @@
package filesystem
import (
"encoding/binary"
"io"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/time/slots"
log "github.com/sirupsen/logrus"
"github.com/spf13/afero"
)
const retentionBuffer primitives.Epoch = 2
var (
errPruningFailures = errors.New("blobs could not be pruned for some roots")
)
type blobPruner struct {
sync.Mutex
prunedBefore atomic.Uint64
windowSize primitives.Slot
slotMap *slotForRoot
fs afero.Fs
}
func newBlobPruner(fs afero.Fs, retain primitives.Epoch) (*blobPruner, error) {
r, err := slots.EpochStart(retain + retentionBuffer)
if err != nil {
return nil, errors.Wrap(err, "could not set retentionSlots")
}
return &blobPruner{fs: fs, windowSize: r, slotMap: newSlotForRoot()}, nil
}
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot) {
p.slotMap.ensure(rootString(root), latest)
pruned := uint64(windowMin(latest, p.windowSize))
if p.prunedBefore.Swap(pruned) == pruned {
return
}
go func() {
if err := p.prune(primitives.Slot(pruned)); err != nil {
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
}
}()
}
func windowMin(latest primitives.Slot, offset primitives.Slot) primitives.Slot {
// Safely compute the first slot in the epoch for the latest slot
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
if latest < offset {
return 0
}
return latest - offset
}
// Prune prunes blobs in the base directory based on the retention epoch.
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
p.Lock()
defer p.Unlock()
start := time.Now()
totalPruned, totalErr := 0, 0
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
// We'll never see a prune request for slot 0, unless this is the initial call to warm up the cache.
if pruneBefore == 0 {
defer func() {
log.WithField("duration", time.Since(start).String()).Debug("Warmed up pruner cache")
}()
} else {
defer func() {
log.WithFields(log.Fields{
"upToEpoch": slots.ToEpoch(pruneBefore),
"duration": time.Since(start).String(),
"filesRemoved": totalPruned,
}).Debug("Pruned old blobs")
blobsPrunedCounter.Add(float64(totalPruned))
}()
}
entries, err := listDir(p.fs, ".")
if err != nil {
return errors.Wrap(err, "unable to list root blobs directory")
}
dirs := filter(entries, filterRoot)
for _, dir := range dirs {
pruned, err := p.tryPruneDir(dir, pruneBefore)
if err != nil {
totalErr += 1
log.WithError(err).WithField("directory", dir).Error("Unable to prune directory")
}
totalPruned += pruned
}
if totalErr > 0 {
return errors.Wrapf(errPruningFailures, "pruning failed for %d root directories", totalErr)
}
return nil
}
func shouldRetain(slot, pruneBefore primitives.Slot) bool {
return slot >= pruneBefore
}
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
root := rootFromDir(dir)
slot, slotCached := p.slotMap.slot(root)
// Return early if the slot is cached and doesn't need pruning.
if slotCached && shouldRetain(slot, pruneBefore) {
return 0, nil
}
// entries will include things that aren't ssz files, like dangling .part files. We need these to
// completely clean up the directory.
entries, err := listDir(p.fs, dir)
if err != nil {
return 0, errors.Wrapf(err, "failed to list blobs in directory %s", dir)
}
// scFiles filters the dir listing down to the ssz encoded BlobSidecar files. This allows us to peek
// at the first one in the list to figure out the slot.
scFiles := filter(entries, filterSsz)
if len(scFiles) == 0 {
log.WithField("dir", dir).Warn("Pruner ignoring directory with no blob files")
return 0, nil
}
if !slotCached {
slot, err = slotFromFile(path.Join(dir, scFiles[0]), p.fs)
if err != nil {
return 0, errors.Wrapf(err, "slot could not be read from blob file %s", scFiles[0])
}
p.slotMap.ensure(root, slot)
if shouldRetain(slot, pruneBefore) {
return 0, nil
}
}
removed := 0
for _, fname := range entries {
fullName := path.Join(dir, fname)
if err := p.fs.Remove(fullName); err != nil {
return removed, errors.Wrapf(err, "unable to remove %s", fullName)
}
// Don't count other files that happen to be in the dir, like dangling .part files.
if filterSsz(fname) {
removed += 1
}
// Log a warning whenever we clean up a .part file
if filterPart(fullName) {
log.WithField("file", fullName).Warn("Deleting abandoned blob .part file")
}
}
if err := p.fs.Remove(dir); err != nil {
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
}
p.slotMap.evict(rootFromDir(dir))
return len(scFiles), nil
}
func rootFromDir(dir string) string {
return filepath.Base(dir) // end of the path should be the blob directory, named by hex encoding of root
}
// Read slot from marshaled BlobSidecar data in the given file. See slotFromBlob for details.
func slotFromFile(file string, fs afero.Fs) (primitives.Slot, error) {
f, err := fs.Open(file)
if err != nil {
return 0, err
}
defer func() {
if err := f.Close(); err != nil {
log.WithError(err).Errorf("Could not close blob file")
}
}()
return slotFromBlob(f)
}
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
// preceding the slot information within SignedBeaconBlockHeader.
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
b := make([]byte, 8)
_, err := at.ReadAt(b, 131176)
if err != nil {
return 0, err
}
rawSlot := binary.LittleEndian.Uint64(b)
return primitives.Slot(rawSlot), nil
}
func listDir(fs afero.Fs, dir string) ([]string, error) {
top, err := fs.Open(dir)
if err != nil {
return nil, errors.Wrap(err, "failed to open directory descriptor")
}
defer func() {
if err := top.Close(); err != nil {
log.WithError(err).Errorf("Could not close file %s", dir)
}
}()
// re the -1 param: "If n <= 0, Readdirnames returns all the names from the directory in a single slice"
dirs, err := top.Readdirnames(-1)
if err != nil {
return nil, errors.Wrap(err, "failed to read directory listing")
}
return dirs, nil
}
func filter(entries []string, filt func(string) bool) []string {
filtered := make([]string, 0, len(entries))
for i := range entries {
if filt(entries[i]) {
filtered = append(filtered, entries[i])
}
}
return filtered
}
func filterRoot(s string) bool {
return strings.HasPrefix(s, "0x")
}
var dotSszExt = "." + sszExt
var dotPartExt = "." + partExt
func filterSsz(s string) bool {
return filepath.Ext(s) == dotSszExt
}
func filterPart(s string) bool {
return filepath.Ext(s) == dotPartExt
}
func newSlotForRoot() *slotForRoot {
return &slotForRoot{
cache: make(map[string]primitives.Slot, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
}
}
type slotForRoot struct {
sync.RWMutex
cache map[string]primitives.Slot
}
func (s *slotForRoot) ensure(key string, slot primitives.Slot) {
s.Lock()
defer s.Unlock()
s.cache[key] = slot
}
func (s *slotForRoot) slot(key string) (primitives.Slot, bool) {
s.RLock()
defer s.RUnlock()
slot, ok := s.cache[key]
return slot, ok
}
func (s *slotForRoot) evict(key string) {
s.Lock()
defer s.Unlock()
delete(s.cache, key)
}

View File

@@ -0,0 +1,327 @@
package filesystem
import (
"bytes"
"fmt"
"math"
"os"
"path"
"sort"
"testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/spf13/afero"
)
func TestTryPruneDir_CachedNotExpired(t *testing.T) {
fs := afero.NewMemMapFs()
pr, err := newBlobPruner(fs, 0)
require.NoError(t, err)
slot := pr.windowSize
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, fieldparams.MaxBlobsPerBlock)
sc, err := verification.BlobSidecarNoop(sidecars[0])
require.NoError(t, err)
root := fmt.Sprintf("%#x", sc.BlockRoot())
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
pr.slotMap.ensure(root, sc.Slot())
pruned, err := pr.tryPruneDir(root, pr.windowSize)
require.NoError(t, err)
require.Equal(t, 0, pruned)
}
func TestTryPruneDir_CachedExpired(t *testing.T) {
t.Run("empty directory", func(t *testing.T) {
fs := afero.NewMemMapFs()
pr, err := newBlobPruner(fs, 0)
require.NoError(t, err)
var slot primitives.Slot = 0
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
sc, err := verification.BlobSidecarNoop(sidecars[0])
require.NoError(t, err)
root := fmt.Sprintf("%#x", sc.BlockRoot())
require.NoError(t, fs.Mkdir(root, directoryPermissions)) // make empty directory
pr.slotMap.ensure(root, sc.Slot())
pruned, err := pr.tryPruneDir(root, slot+1)
require.NoError(t, err)
require.Equal(t, 0, pruned)
})
t.Run("blobs to delete", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
var slot primitives.Slot = 0
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
require.NoError(t, bs.Save(scs[0]))
require.NoError(t, bs.Save(scs[1]))
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, cok := bs.pruner.slotMap.slot(root)
require.Equal(t, true, cok)
require.Equal(t, slot, cs)
// ensure that we see the saved files in the filesystem
files, err := listDir(fs, root)
require.NoError(t, err)
require.Equal(t, 2, len(files))
pruned, err := bs.pruner.tryPruneDir(root, slot+1)
require.NoError(t, err)
require.Equal(t, 2, pruned)
files, err = listDir(fs, root)
require.ErrorIs(t, err, os.ErrNotExist)
require.Equal(t, 0, len(files))
})
}
func TestTryPruneDir_SlotFromFile(t *testing.T) {
t.Run("expired blobs deleted", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
var slot primitives.Slot = 0
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
require.NoError(t, bs.Save(scs[0]))
require.NoError(t, bs.Save(scs[1]))
// check that the root->slot is cached
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
cs, ok := bs.pruner.slotMap.slot(root)
require.Equal(t, true, ok)
require.Equal(t, slot, cs)
// evict it from the cache so that we trigger the file read path
bs.pruner.slotMap.evict(root)
_, ok = bs.pruner.slotMap.slot(root)
require.Equal(t, false, ok)
// ensure that we see the saved files in the filesystem
files, err := listDir(fs, root)
require.NoError(t, err)
require.Equal(t, 2, len(files))
pruned, err := bs.pruner.tryPruneDir(root, slot+1)
require.NoError(t, err)
require.Equal(t, 2, pruned)
files, err = listDir(fs, root)
require.ErrorIs(t, err, os.ErrNotExist)
require.Equal(t, 0, len(files))
})
t.Run("not expired, intact", func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
// Set slot equal to the window size, so it should be retained.
var slot primitives.Slot = bs.pruner.windowSize
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
scs, err := verification.BlobSidecarSliceNoop(sidecars)
require.NoError(t, err)
require.NoError(t, bs.Save(scs[0]))
require.NoError(t, bs.Save(scs[1]))
// Evict slot mapping from the cache so that we trigger the file read path.
root := fmt.Sprintf("%#x", scs[0].BlockRoot())
bs.pruner.slotMap.evict(root)
_, ok := bs.pruner.slotMap.slot(root)
require.Equal(t, false, ok)
// Ensure that we see the saved files in the filesystem.
files, err := listDir(fs, root)
require.NoError(t, err)
require.Equal(t, 2, len(files))
// This should use the slotFromFile code (simulating restart).
// Setting pruneBefore == slot, so that the slot will be outside the window (at the boundary).
pruned, err := bs.pruner.tryPruneDir(root, slot)
require.NoError(t, err)
require.Equal(t, 0, pruned)
// Ensure files are still present.
files, err = listDir(fs, root)
require.NoError(t, err)
require.Equal(t, 2, len(files))
})
}
func TestSlotFromBlob(t *testing.T) {
cases := []struct {
slot primitives.Slot
}{
{slot: 0},
{slot: 2},
{slot: 1123581321},
{slot: math.MaxUint64},
}
for _, c := range cases {
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
sc := sidecars[0]
enc, err := sc.MarshalSSZ()
require.NoError(t, err)
slot, err := slotFromBlob(bytes.NewReader(enc))
require.NoError(t, err)
require.Equal(t, c.slot, slot)
})
}
}
func TestSlotFromFile(t *testing.T) {
cases := []struct {
slot primitives.Slot
}{
{slot: 0},
{slot: 2},
{slot: 1123581321},
{slot: math.MaxUint64},
}
for _, c := range cases {
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
fs, bs, err := NewEphemeralBlobStorageWithFs(t)
require.NoError(t, err)
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
sc, err := verification.BlobSidecarNoop(sidecars[0])
require.NoError(t, err)
require.NoError(t, bs.Save(sc))
fname := namerForSidecar(sc)
sszPath := fname.path()
slot, err := slotFromFile(sszPath, fs)
require.NoError(t, err)
require.Equal(t, c.slot, slot)
})
}
}
type dirFiles struct {
name string
isDir bool
children []dirFiles
}
func (df dirFiles) reify(t *testing.T, fs afero.Fs, base string) {
fullPath := path.Join(base, df.name)
if df.isDir {
if df.name != "" {
require.NoError(t, fs.Mkdir(fullPath, directoryPermissions))
}
for _, c := range df.children {
c.reify(t, fs, fullPath)
}
} else {
fp, err := fs.Create(fullPath)
require.NoError(t, err)
_, err = fp.WriteString("derp")
require.NoError(t, err)
}
}
func (df dirFiles) childNames() []string {
cn := make([]string, len(df.children))
for i := range df.children {
cn[i] = df.children[i].name
}
return cn
}
func TestListDir(t *testing.T) {
fs := afero.NewMemMapFs()
// parent directory
fsLayout := dirFiles{isDir: true}
// break out each subdir for easier assertions
notABlob := dirFiles{name: "notABlob", isDir: true}
childlessBlob := dirFiles{name: "0x0987654321", isDir: true}
blobWithSsz := dirFiles{name: "0x1123581321", isDir: true,
children: []dirFiles{{name: "1.ssz"}, {name: "2.ssz"}},
}
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
fsLayout.children = append(fsLayout.children, notABlob)
fsLayout.children = append(fsLayout.children, childlessBlob)
fsLayout.children = append(fsLayout.children, blobWithSsz)
fsLayout.children = append(fsLayout.children, blobWithSszAndTmp)
topChildren := make([]string, len(fsLayout.children))
for i := range fsLayout.children {
topChildren[i] = fsLayout.children[i].name
}
fsLayout.reify(t, fs, "")
cases := []struct {
name string
dirPath string
expected []string
filter func(string) bool
err error
}{
{
name: "non-existent",
dirPath: "derp",
expected: []string{},
err: os.ErrNotExist,
},
{
name: "empty",
dirPath: childlessBlob.name,
expected: []string{},
},
{
name: "top",
dirPath: ".",
expected: topChildren,
},
{
name: "custom filter: only notABlob",
dirPath: ".",
expected: []string{notABlob.name},
filter: func(s string) bool {
if s == notABlob.name {
return true
}
return false
},
},
{
name: "root filter",
dirPath: ".",
expected: []string{childlessBlob.name, blobWithSsz.name, blobWithSszAndTmp.name},
filter: filterRoot,
},
{
name: "ssz filter",
dirPath: blobWithSsz.name,
expected: blobWithSsz.childNames(),
filter: filterSsz,
},
{
name: "ssz mixed filter",
dirPath: blobWithSszAndTmp.name,
expected: []string{"5.ssz"},
filter: filterSsz,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
result, err := listDir(fs, c.dirPath)
if c.filter != nil {
result = filter(result, c.filter)
}
if c.err != nil {
require.ErrorIs(t, err, c.err)
require.Equal(t, 0, len(result))
} else {
require.NoError(t, err)
sort.Strings(c.expected)
sort.Strings(result)
require.DeepEqual(t, c.expected, result)
}
})
}
}

View File

@@ -55,9 +55,6 @@ type ReadOnlyDatabase interface {
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
// Blob operations.
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error)
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error)
// origin checkpoint sync support
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
@@ -93,9 +90,6 @@ type NoHeadAccessDatabase interface {
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
// Blob operations.
DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
}

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"archived_point.go",
"backup.go",
"blob.go",
"blocks.go",
"checkpoint.go",
"deposit_contract.go",
@@ -39,7 +38,6 @@ go_library(
"//beacon-chain/state/genesis:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
@@ -76,7 +74,6 @@ go_test(
srcs = [
"archived_point_test.go",
"backup_test.go",
"blob_test.go",
"blocks_test.go",
"checkpoint_test.go",
"deposit_contract_test.go",
@@ -114,7 +111,6 @@ go_test(
"//proto/prysm/v1alpha1:go_default_library",
"//proto/testing:go_default_library",
"//testing/assert:go_default_library",
"//testing/assertions:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",

View File

@@ -1,320 +0,0 @@
package kv
import (
"bytes"
"context"
"sort"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
)
var (
errBlobSlotMismatch = errors.New("sidecar slot mismatch")
errBlobParentMismatch = errors.New("sidecar parent root mismatch")
errBlobRootMismatch = errors.New("sidecar root mismatch")
errBlobProposerMismatch = errors.New("sidecar proposer index mismatch")
errBlobSidecarLimit = errors.New("sidecar exceeds maximum number of blobs")
errEmptySidecar = errors.New("nil or empty blob sidecars")
errNewerBlobExists = errors.New("Will not overwrite newer blobs in db")
)
// A blob rotating key is represented as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
type blobRotatingKey []byte
// BufferPrefix returns the first 8 bytes of the rotating key.
// This represents bytes(slot_to_rotating_buffer(blob.slot)) in the rotating key.
func (rk blobRotatingKey) BufferPrefix() []byte {
return rk[0:8]
}
// Slot returns the information from the key.
func (rk blobRotatingKey) Slot() types.Slot {
slotBytes := rk[8:16]
return bytesutil.BytesToSlotBigEndian(slotBytes)
}
// BlockRoot returns the block root information from the key.
func (rk blobRotatingKey) BlockRoot() []byte {
return rk[16:]
}
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
//
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_EPOCHS_TO_PERSIST_BLOBS*SLOTS_PER_EPOCH
//
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
//
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
// in the rotating keys buffer, we overwrite all elements for that slot. Otherwise, we merge the blob with an existing one.
// Trying to replace a newer blob with an older one is an error.
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.DeprecatedBlobSidecar) error {
if len(scs) == 0 {
return errEmptySidecar
}
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
defer span.End()
first := scs[0]
newKey := s.blobSidecarKey(first)
prefix := newKey.BufferPrefix()
var prune []blobRotatingKey
return s.db.Update(func(tx *bolt.Tx) error {
var existing []byte
sc := &ethpb.DeprecatedBlobSidecars{}
bkt := tx.Bucket(blobsBucket)
c := bkt.Cursor()
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
key := blobRotatingKey(k)
ks := key.Slot()
if ks < first.Slot {
// Mark older blobs at the same position of the ring buffer for deletion.
prune = append(prune, key)
continue
}
if ks > first.Slot {
// We shouldn't be overwriting newer blobs with older blobs. Something is wrong.
return errNewerBlobExists
}
// The slot isn't older or newer, so it must be equal.
// If the roots match, then we want to merge the new sidecars with the existing data.
if bytes.Equal(first.BlockRoot, key.BlockRoot()) {
existing = v
if err := decode(ctx, v, sc); err != nil {
return err
}
}
// If the slot is equal but the roots don't match, leave the existing key alone and allow the sidecar
// to be written to the new key with the same prefix. In this case sc will be empty, so it will just
// contain the incoming sidecars when we write it.
}
sc.Sidecars = append(sc.Sidecars, scs...)
sortSidecars(sc.Sidecars)
var err error
sc.Sidecars, err = validUniqueSidecars(sc.Sidecars)
if err != nil {
return err
}
encoded, err := encode(ctx, sc)
if err != nil {
return err
}
// don't write if the merged result is the same as before
if len(existing) == len(encoded) && bytes.Equal(existing, encoded) {
return nil
}
// Only prune if we're actually going through with the update.
for _, k := range prune {
if err := bkt.Delete(k); err != nil {
// note: attempting to delete a key that does not exist should not return an error.
log.WithError(err).Warnf("Could not delete blob key %#x.", k)
}
}
return bkt.Put(newKey, encoded)
})
}
// validUniqueSidecars ensures that all sidecars have the same slot, parent root, block root, and proposer index, and
// there are no more than MAX_BLOBS_PER_BLOCK sidecars.
func validUniqueSidecars(scs []*ethpb.DeprecatedBlobSidecar) ([]*ethpb.DeprecatedBlobSidecar, error) {
if len(scs) == 0 {
return nil, errEmptySidecar
}
// If there's only 1 sidecar, we've got nothing to compare.
if len(scs) == 1 {
return scs, nil
}
prev := scs[0]
didx := 1
for i := 1; i < len(scs); i++ {
sc := scs[i]
if sc.Slot != prev.Slot {
return nil, errors.Wrapf(errBlobSlotMismatch, "%d != %d", sc.Slot, prev.Slot)
}
if !bytes.Equal(sc.BlockParentRoot, prev.BlockParentRoot) {
return nil, errors.Wrapf(errBlobParentMismatch, "%x != %x", sc.BlockParentRoot, prev.BlockParentRoot)
}
if !bytes.Equal(sc.BlockRoot, prev.BlockRoot) {
return nil, errors.Wrapf(errBlobRootMismatch, "%x != %x", sc.BlockRoot, prev.BlockRoot)
}
if sc.ProposerIndex != prev.ProposerIndex {
return nil, errors.Wrapf(errBlobProposerMismatch, "%d != %d", sc.ProposerIndex, prev.ProposerIndex)
}
// skip duplicate
if sc.Index == prev.Index {
continue
}
if didx != i {
scs[didx] = scs[i]
}
prev = scs[i]
didx += 1
}
if didx > fieldparams.MaxBlobsPerBlock {
return nil, errors.Wrapf(errBlobSidecarLimit, "%d > %d", didx, fieldparams.MaxBlobsPerBlock)
}
return scs[0:didx], nil
}
// sortSidecars sorts the sidecars by their index.
func sortSidecars(scs []*ethpb.DeprecatedBlobSidecar) {
sort.Slice(scs, func(i, j int) bool {
return scs[i].Index < scs[j].Index
})
}
// BlobSidecarsByRoot retrieves the blobs for the given beacon block root.
// If the `indices` argument is omitted, all blobs for the root will be returned.
// Otherwise, the result will be filtered to only include the specified indices.
// An error will result if an invalid index is specified.
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
defer span.End()
var enc []byte
if err := s.db.View(func(tx *bolt.Tx) error {
c := tx.Bucket(blobsBucket).Cursor()
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
for k, v := c.First(); k != nil; k, v = c.Next() {
if bytes.HasSuffix(k, root[:]) {
enc = v
break
}
}
return nil
}); err != nil {
return nil, err
}
if enc == nil {
return nil, ErrNotFound
}
sc := &ethpb.DeprecatedBlobSidecars{}
if err := decode(ctx, enc, sc); err != nil {
return nil, err
}
return filterForIndices(sc, indices...)
}
func filterForIndices(sc *ethpb.DeprecatedBlobSidecars, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
if len(indices) == 0 {
return sc.Sidecars, nil
}
// This loop assumes that the BlobSidecars value stores the complete set of blobs for a block
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
// maps 1:1 with indices in the BlobSidecars storage object.
maxIdx := uint64(len(sc.Sidecars)) - 1
sidecars := make([]*ethpb.DeprecatedBlobSidecar, len(indices))
for i, idx := range indices {
if idx > maxIdx {
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
}
sidecars[i] = sc.Sidecars[idx]
}
return sidecars, nil
}
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
// If the `indices` argument is omitted, all blobs for the slot will be returned.
// Otherwise, the result will be filtered to only include the specified indices.
// An error will result if an invalid index is specified.
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.DeprecatedBlobSidecar, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
defer span.End()
var enc []byte
sk := s.slotKey(slot)
if err := s.db.View(func(tx *bolt.Tx) error {
c := tx.Bucket(blobsBucket).Cursor()
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
for k, v := c.Seek(sk); bytes.HasPrefix(k, sk); k, _ = c.Next() {
slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16])
if slotInKey == slot {
enc = v
break
}
}
return nil
}); err != nil {
return nil, err
}
if enc == nil {
return nil, ErrNotFound
}
sc := &ethpb.DeprecatedBlobSidecars{}
if err := decode(ctx, enc, sc); err != nil {
return nil, err
}
return filterForIndices(sc, indices...)
}
// DeleteBlobSidecars returns true if the blobs are in the db.
func (s *Store) DeleteBlobSidecars(ctx context.Context, beaconBlockRoot [32]byte) error {
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
defer span.End()
return s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blobsBucket)
c := bkt.Cursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
if err := bkt.Delete(k); err != nil {
return err
}
}
}
return nil
})
}
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
func (s *Store) blobSidecarKey(blob *ethpb.DeprecatedBlobSidecar) blobRotatingKey {
key := s.slotKey(blob.Slot)
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
key = append(key, blob.BlockRoot...)
return key
}
func (s *Store) slotKey(slot types.Slot) []byte {
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(s.blobRetentionSlots()))
}
func (s *Store) blobRetentionSlots() types.Slot {
return types.Slot(s.blobRetentionEpochs.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
}
var errBlobRetentionEpochMismatch = errors.New("epochs for blobs request value in DB does not match runtime config")
func (s *Store) checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
uRetentionEpochs := uint64(s.blobRetentionEpochs)
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(chainMetadataBucket)
v := b.Get(blobRetentionEpochsKey)
if v == nil {
if err := b.Put(blobRetentionEpochsKey, bytesutil.Uint64ToBytesBigEndian(uRetentionEpochs)); err != nil {
return err
}
return nil
}
e := bytesutil.BytesToUint64BigEndian(v)
if e != uRetentionEpochs {
return errors.Wrapf(errBlobRetentionEpochMismatch, "db=%d, config=%d", e, uRetentionEpochs)
}
return nil
}); err != nil {
return err
}
return nil
}

View File

@@ -1,532 +0,0 @@
package kv
import (
"context"
"crypto/rand"
"fmt"
"testing"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
"github.com/prysmaticlabs/prysm/v4/testing/require"
bolt "go.etcd.io/bbolt"
)
func equalBlobSlices(expect []*ethpb.DeprecatedBlobSidecar, got []*ethpb.DeprecatedBlobSidecar) error {
if len(expect) != len(got) {
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
}
for i := 0; i < len(expect); i++ {
es := expect[i]
gs := got[i]
var e string
assertions.DeepEqual(assertions.SprintfAssertionLoggerFn(&e), es, gs)
if e != "" {
return errors.New(e)
}
}
return nil
}
func TestStore_BlobSidecars(t *testing.T) {
ctx := context.Background()
t.Run("empty", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, 0)
require.ErrorContains(t, "nil or empty blob sidecars", db.SaveBlobSidecar(ctx, scs))
})
t.Run("empty by root", func(t *testing.T) {
db := setupDB(t)
got, err := db.BlobSidecarsByRoot(ctx, [32]byte{})
require.ErrorIs(t, ErrNotFound, err)
require.Equal(t, 0, len(got))
})
t.Run("empty by slot", func(t *testing.T) {
db := setupDB(t)
got, err := db.BlobSidecarsBySlot(ctx, 1)
require.ErrorIs(t, ErrNotFound, err)
require.Equal(t, 0, len(got))
})
t.Run("save and retrieve by root (one)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, 1)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, 1, len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by root (max), per batch", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by root, max and individually", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve valid subset by root", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
// we'll request indices 0 and 3, so make a slice with those indices for comparison
expect := make([]*ethpb.DeprecatedBlobSidecar, 2)
expect[0] = scs[0]
expect[1] = scs[3]
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), 0, 3)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(expect, got))
require.Equal(t, uint64(0), got[0].Index)
require.Equal(t, uint64(3), got[1].Index)
})
t.Run("error for invalid index when retrieving by root", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
require.ErrorIs(t, err, ErrNotFound)
require.Equal(t, 0, len(got))
})
t.Run("save and retrieve by slot (one)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, 1)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, 1, len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by slot (max)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve by slot, max and individually", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save and retrieve valid subset by slot", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
// we'll request indices 0 and 3, so make a slice with those indices for comparison
expect := make([]*ethpb.DeprecatedBlobSidecar, 2)
expect[0] = scs[0]
expect[1] = scs[3]
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, 0, 3)
require.NoError(t, err)
require.NoError(t, equalBlobSlices(expect, got))
require.Equal(t, uint64(0), got[0].Index)
require.Equal(t, uint64(3), got[1].Index)
})
t.Run("error for invalid index when retrieving by slot", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
require.ErrorIs(t, err, ErrNotFound)
require.Equal(t, 0, len(got))
})
t.Run("delete works", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
require.NoError(t, db.DeleteBlobSidecars(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.ErrorIs(t, ErrNotFound, err)
require.Equal(t, 0, len(got))
})
t.Run("saving blob different times", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for i := 0; i < fieldparams.MaxBlobsPerBlock; i++ {
scs[i].Slot = primitives.Slot(i)
scs[i].BlockRoot = bytesutil.PadTo([]byte{byte(i)}, 32)
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{scs[i]}))
br := bytesutil.ToBytes32(scs[i].BlockRoot)
saved, err := db.BlobSidecarsByRoot(ctx, br)
require.NoError(t, err)
require.NoError(t, equalBlobSlices([]*ethpb.DeprecatedBlobSidecar{scs[i]}, saved))
}
})
t.Run("saving a new blob for rotation (batch)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
oldBlockRoot := scs[0].BlockRoot
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
newScs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
for _, sc := range newScs {
sc.Slot = sc.Slot + newRetentionSlot
}
require.NoError(t, db.SaveBlobSidecar(ctx, newScs))
_, err = db.BlobSidecarsBySlot(ctx, 100)
require.ErrorIs(t, ErrNotFound, err)
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(newScs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(newScs, got))
})
t.Run("save multiple blobs after new rotation (individually)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
for _, sc := range scs {
sc.Slot = sc.Slot + newRetentionSlot
}
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
_, err = db.BlobSidecarsBySlot(ctx, 100)
require.ErrorIs(t, ErrNotFound, err)
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save multiple blobs after new rotation (batch then individually)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
require.Equal(t, fieldparams.MaxBlobsPerBlock, len(scs))
oldBlockRoot := scs[0].BlockRoot
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
for _, sc := range scs {
sc.Slot = sc.Slot + newRetentionSlot
}
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
_, err = db.BlobSidecarsBySlot(ctx, 100)
require.ErrorIs(t, ErrNotFound, err)
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save multiple blobs after new rotation (individually then batch)", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
for _, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
}
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
scs = generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
newRetentionSlot := primitives.Slot(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
for _, sc := range scs {
sc.Slot = sc.Slot + newRetentionSlot
}
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
_, err = db.BlobSidecarsBySlot(ctx, 100)
require.ErrorIs(t, ErrNotFound, err)
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
})
t.Run("save equivocating blobs", func(t *testing.T) {
db := setupDB(t)
scs := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2)
eScs := generateEquivocatingBlobSidecars(t, fieldparams.MaxBlobsPerBlock/2)
for i, sc := range scs {
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{sc}))
require.NoError(t, db.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{eScs[i]}))
}
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(scs, got))
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(eScs[0].BlockRoot))
require.NoError(t, err)
require.NoError(t, equalBlobSlices(eScs, got))
})
}
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.DeprecatedBlobSidecar {
blobSidecars := make([]*ethpb.DeprecatedBlobSidecar, n)
for i := uint64(0); i < n; i++ {
blobSidecars[i] = generateBlobSidecar(t, i)
}
return blobSidecars
}
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.DeprecatedBlobSidecar {
blob := make([]byte, 131072)
_, err := rand.Read(blob)
require.NoError(t, err)
kzgCommitment := make([]byte, 48)
_, err = rand.Read(kzgCommitment)
require.NoError(t, err)
kzgProof := make([]byte, 48)
_, err = rand.Read(kzgProof)
require.NoError(t, err)
return &ethpb.DeprecatedBlobSidecar{
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
Index: index,
Slot: 100,
BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
ProposerIndex: 101,
Blob: blob,
KzgCommitment: kzgCommitment,
KzgProof: kzgProof,
}
}
func generateEquivocatingBlobSidecars(t *testing.T, n uint64) []*ethpb.DeprecatedBlobSidecar {
blobSidecars := make([]*ethpb.DeprecatedBlobSidecar, n)
for i := uint64(0); i < n; i++ {
blobSidecars[i] = generateEquivocatingBlobSidecar(t, i)
}
return blobSidecars
}
func generateEquivocatingBlobSidecar(t *testing.T, index uint64) *ethpb.DeprecatedBlobSidecar {
blob := make([]byte, 131072)
_, err := rand.Read(blob)
require.NoError(t, err)
kzgCommitment := make([]byte, 48)
_, err = rand.Read(kzgCommitment)
require.NoError(t, err)
kzgProof := make([]byte, 48)
_, err = rand.Read(kzgProof)
require.NoError(t, err)
return &ethpb.DeprecatedBlobSidecar{
BlockRoot: bytesutil.PadTo([]byte{'c'}, 32),
Index: index,
Slot: 100,
BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
ProposerIndex: 102,
Blob: blob,
KzgCommitment: kzgCommitment,
KzgProof: kzgProof,
}
}
func Test_validUniqueSidecars_validation(t *testing.T) {
tests := []struct {
name string
scs []*ethpb.DeprecatedBlobSidecar
err error
}{
{name: "empty", scs: []*ethpb.DeprecatedBlobSidecar{}, err: errEmptySidecar},
{name: "too many sidecars", scs: generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock+1), err: errBlobSidecarLimit},
{name: "invalid slot", scs: []*ethpb.DeprecatedBlobSidecar{{Slot: 1}, {Slot: 2}}, err: errBlobSlotMismatch},
{name: "invalid proposer index", scs: []*ethpb.DeprecatedBlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, err: errBlobProposerMismatch},
{name: "invalid root", scs: []*ethpb.DeprecatedBlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, err: errBlobRootMismatch},
{name: "invalid parent root", scs: []*ethpb.DeprecatedBlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, err: errBlobParentMismatch},
{name: "happy path", scs: []*ethpb.DeprecatedBlobSidecar{{Index: 0}, {Index: 1}}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := validUniqueSidecars(tt.scs)
if tt.err != nil {
require.ErrorIs(t, err, tt.err)
} else {
require.NoError(t, err)
}
})
}
}
func Test_validUniqueSidecars_dedup(t *testing.T) {
cases := []struct {
name string
scs []*ethpb.DeprecatedBlobSidecar
expected []*ethpb.DeprecatedBlobSidecar
err error
}{
{
name: "duplicate sidecar",
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 1}},
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
},
{
name: "single sidecar",
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}},
},
{
name: "multiple duplicates",
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
},
{
name: "ok number after de-dupe, > 6 before",
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 2}, {Index: 3}, {Index: 3}},
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}},
},
{
name: "max unique, no dupes",
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
expected: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}},
},
{
name: "too many unique",
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
err: errBlobSidecarLimit,
},
{
name: "too many unique with dupes",
scs: []*ethpb.DeprecatedBlobSidecar{{Index: 1}, {Index: 1}, {Index: 1}, {Index: 2}, {Index: 3}, {Index: 4}, {Index: 5}, {Index: 6}, {Index: 7}},
err: errBlobSidecarLimit,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
u, err := validUniqueSidecars(c.scs)
if c.err != nil {
require.ErrorIs(t, err, c.err)
} else {
require.NoError(t, err)
}
require.Equal(t, len(c.expected), len(u))
})
}
}
func TestStore_sortSidecars(t *testing.T) {
scs := []*ethpb.DeprecatedBlobSidecar{
{Index: 6},
{Index: 4},
{Index: 2},
{Index: 1},
{Index: 3},
{Index: 5},
{},
}
sortSidecars(scs)
for i := 0; i < len(scs)-1; i++ {
require.Equal(t, uint64(i), scs[i].Index)
}
}
func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
s := setupDB(b)
ctx := context.Background()
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{
{BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), Slot: 0},
}))
err := s.db.Update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(blobsBucket)
for i := 1; i < 131071; i++ {
r := make([]byte, 32)
_, err := rand.Read(r)
require.NoError(b, err)
scs := []*ethpb.DeprecatedBlobSidecar{
{BlockRoot: r, Slot: primitives.Slot(i)},
}
k := s.blobSidecarKey(scs[0])
encodedBlobSidecar, err := encode(ctx, &ethpb.DeprecatedBlobSidecars{Sidecars: scs})
require.NoError(b, err)
require.NoError(b, bkt.Put(k, encodedBlobSidecar))
}
return nil
})
require.NoError(b, err)
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{
{BlockRoot: bytesutil.PadTo([]byte{'b'}, 32), Slot: 131071},
}))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := s.BlobSidecarsByRoot(ctx, [32]byte{'b'})
require.NoError(b, err)
}
}
func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
s := setupDB(t)
require.NoError(t, s.checkEpochsForBlobSidecarsRequestBucket(s.db)) // First write
require.NoError(t, s.checkEpochsForBlobSidecarsRequestBucket(s.db)) // First check
s.blobRetentionEpochs += 1
require.ErrorIs(t, s.checkEpochsForBlobSidecarsRequestBucket(s.db), errBlobRetentionEpochMismatch)
}
func TestBlobRotatingKey(t *testing.T) {
s := setupDB(t)
k := s.blobSidecarKey(&ethpb.DeprecatedBlobSidecar{
Slot: 1,
BlockRoot: []byte{2},
})
require.Equal(t, types.Slot(1), k.Slot())
require.DeepEqual(t, []byte{2}, k.BlockRoot())
require.DeepEqual(t, s.slotKey(types.Slot(1)), k.BufferPrefix())
}

View File

@@ -18,7 +18,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/io/file"
bolt "go.etcd.io/bbolt"
)
@@ -91,7 +90,6 @@ type Store struct {
validatorEntryCache *ristretto.Cache
stateSummaryCache *stateSummaryCache
ctx context.Context
blobRetentionEpochs primitives.Epoch
}
// StoreDatafilePath is the canonical construction of a full
@@ -138,13 +136,6 @@ var Buckets = [][]byte{
// KVStoreOption is a functional option that modifies a kv.Store.
type KVStoreOption func(*Store)
// WithBlobRetentionEpochs sets the variable configuring the blob retention window.
func WithBlobRetentionEpochs(e primitives.Epoch) KVStoreOption {
return func(s *Store) {
s.blobRetentionEpochs = e
}
}
// NewKVStore initializes a new boltDB key-value store at the directory
// path specified, creates the kv-buckets based on the schema, and stores
// an open connection db object as a property of the Store struct.
@@ -217,14 +208,6 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
return nil, err
}
if err := kv.checkEpochsForBlobSidecarsRequestBucket(boltDB); err != nil {
return nil, errors.Wrap(err, "failed to check epochs for blob sidecars request bucket")
}
// set a default so that tests don't break
if kv.blobRetentionEpochs == 0 {
kv.blobRetentionEpochs = params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
}
return kv, nil
}

View File

@@ -6,7 +6,6 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -16,8 +15,7 @@ import (
// setupDB instantiates and returns a Store instance.
func setupDB(t testing.TB) *Store {
opt := WithBlobRetentionEpochs(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
db, err := NewKVStore(context.Background(), t.TempDir(), opt)
db, err := NewKVStore(context.Background(), t.TempDir())
require.NoError(t, err, "Failed to instantiate DB")
t.Cleanup(func() {
require.NoError(t, db.Close(), "Failed to close database")

View File

@@ -105,7 +105,6 @@ go_test(
"//beacon-chain/execution/types:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -15,7 +15,6 @@ import (
"github.com/holiman/uint256"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/types"
"github.com/prysmaticlabs/prysm/v4/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -489,6 +488,10 @@ func (s *Service) GetPayloadBodiesByHash(ctx context.Context, executionBlockHash
defer span.End()
result := make([]*pb.ExecutionPayloadBodyV1, 0)
// Exit early if there are no execution hashes.
if len(executionBlockHashes) == 0 {
return result, nil
}
err := s.rpcClient.CallContext(ctx, &result, GetPayloadBodiesByHashV1, executionBlockHashes)
for i, item := range result {
@@ -621,31 +624,15 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
}
func (s *Service) retrievePayloadFromExecutionHash(ctx context.Context, executionBlockHash common.Hash, header interfaces.ExecutionData, version int) (interfaces.ExecutionData, error) {
if features.Get().EnableOptionalEngineMethods {
pBodies, err := s.GetPayloadBodiesByHash(ctx, []common.Hash{executionBlockHash})
if err != nil {
return nil, fmt.Errorf("could not get payload body by hash %#x: %v", executionBlockHash, err)
}
if len(pBodies) != 1 {
return nil, errors.Errorf("could not retrieve the correct number of payload bodies: wanted 1 but got %d", len(pBodies))
}
bdy := pBodies[0]
return fullPayloadFromPayloadBody(header, bdy, version)
}
executionBlock, err := s.ExecutionBlockByHash(ctx, executionBlockHash, true /* with txs */)
pBodies, err := s.GetPayloadBodiesByHash(ctx, []common.Hash{executionBlockHash})
if err != nil {
return nil, fmt.Errorf("could not fetch execution block with txs by hash %#x: %v", executionBlockHash, err)
return nil, fmt.Errorf("could not get payload body by hash %#x: %v", executionBlockHash, err)
}
if executionBlock == nil {
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionBlockHash)
if len(pBodies) != 1 {
return nil, errors.Errorf("could not retrieve the correct number of payload bodies: wanted 1 but got %d", len(pBodies))
}
if bytes.Equal(executionBlock.Hash.Bytes(), []byte{}) {
return nil, ErrEmptyBlockHash
}
executionBlock.Version = version
return fullPayloadFromExecutionBlock(version, header, executionBlock)
bdy := pBodies[0]
return fullPayloadFromPayloadBody(header, bdy, version)
}
func (s *Service) retrievePayloadsFromExecutionHashes(
@@ -654,19 +641,12 @@ func (s *Service) retrievePayloadsFromExecutionHashes(
validExecPayloads []int,
blindedBlocks []interfaces.ReadOnlySignedBeaconBlock) ([]interfaces.SignedBeaconBlock, error) {
fullBlocks := make([]interfaces.SignedBeaconBlock, len(blindedBlocks))
var execBlocks []*pb.ExecutionBlock
var payloadBodies []*pb.ExecutionPayloadBodyV1
var err error
if features.Get().EnableOptionalEngineMethods {
payloadBodies, err = s.GetPayloadBodiesByHash(ctx, executionHashes)
if err != nil {
return nil, fmt.Errorf("could not fetch payload bodies by hash %#x: %v", executionHashes, err)
}
} else {
execBlocks, err = s.ExecutionBlocksByHashes(ctx, executionHashes, true /* with txs*/)
if err != nil {
return nil, fmt.Errorf("could not fetch execution blocks with txs by hash %#x: %v", executionHashes, err)
}
payloadBodies, err = s.GetPayloadBodiesByHash(ctx, executionHashes)
if err != nil {
return nil, fmt.Errorf("could not fetch payload bodies by hash %#x: %v", executionHashes, err)
}
// For each valid payload, we reconstruct the full block from it with the
@@ -674,32 +654,17 @@ func (s *Service) retrievePayloadsFromExecutionHashes(
for sliceIdx, realIdx := range validExecPayloads {
var payload interfaces.ExecutionData
bblock := blindedBlocks[realIdx]
if features.Get().EnableOptionalEngineMethods {
b := payloadBodies[sliceIdx]
if b == nil {
return nil, fmt.Errorf("received nil payload body for request by hash %#x", executionHashes[sliceIdx])
}
header, err := bblock.Block().Body().Execution()
if err != nil {
return nil, err
}
payload, err = fullPayloadFromPayloadBody(header, b, bblock.Version())
if err != nil {
return nil, err
}
} else {
b := execBlocks[sliceIdx]
if b == nil {
return nil, fmt.Errorf("received nil execution block for request by hash %#x", executionHashes[sliceIdx])
}
header, err := bblock.Block().Body().Execution()
if err != nil {
return nil, err
}
payload, err = fullPayloadFromExecutionBlock(bblock.Version(), header, b)
if err != nil {
return nil, err
}
b := payloadBodies[sliceIdx]
if b == nil {
return nil, fmt.Errorf("received nil payload body for request by hash %#x", executionHashes[sliceIdx])
}
header, err := bblock.Block().Body().Execution()
if err != nil {
return nil, err
}
payload, err = fullPayloadFromPayloadBody(header, b, bblock.Version())
if err != nil {
return nil, err
}
fullBlock, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(bblock, payload.Proto())
if err != nil {

View File

@@ -20,7 +20,6 @@ import (
"github.com/holiman/uint256"
"github.com/pkg/errors"
mocks "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v4/config/features"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
@@ -758,26 +757,7 @@ func TestReconstructFullBellatrixBlock(t *testing.T) {
encodedBinaryTxs[0], err = txs[0].MarshalBinary()
require.NoError(t, err)
payload.Transactions = encodedBinaryTxs
jsonPayload["transactions"] = txs
num := big.NewInt(1)
encodedNum := hexutil.EncodeBig(num)
jsonPayload["hash"] = hexutil.Encode(payload.BlockHash)
jsonPayload["parentHash"] = common.BytesToHash([]byte("parent"))
jsonPayload["sha3Uncles"] = common.BytesToHash([]byte("uncles"))
jsonPayload["miner"] = common.BytesToAddress([]byte("miner"))
jsonPayload["stateRoot"] = common.BytesToHash([]byte("state"))
jsonPayload["transactionsRoot"] = common.BytesToHash([]byte("txs"))
jsonPayload["receiptsRoot"] = common.BytesToHash([]byte("receipts"))
jsonPayload["logsBloom"] = gethtypes.BytesToBloom([]byte("bloom"))
jsonPayload["gasLimit"] = hexutil.EncodeUint64(1)
jsonPayload["gasUsed"] = hexutil.EncodeUint64(2)
jsonPayload["timestamp"] = hexutil.EncodeUint64(3)
jsonPayload["number"] = encodedNum
jsonPayload["extraData"] = common.BytesToHash([]byte("extra"))
jsonPayload["totalDifficulty"] = "0x123456"
jsonPayload["difficulty"] = encodedNum
jsonPayload["size"] = encodedNum
jsonPayload["baseFeePerGas"] = encodedNum
jsonPayload["transactions"] = []hexutil.Bytes{encodedBinaryTxs[0]}
wrappedPayload, err := blocks.WrappedExecutionPayload(payload)
require.NoError(t, err)
@@ -792,7 +772,7 @@ func TestReconstructFullBellatrixBlock(t *testing.T) {
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": jsonPayload,
"result": []map[string]interface{}{jsonPayload},
}
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
}))
@@ -869,26 +849,7 @@ func TestReconstructFullBellatrixBlockBatch(t *testing.T) {
encodedBinaryTxs[0], err = txs[0].MarshalBinary()
require.NoError(t, err)
payload.Transactions = encodedBinaryTxs
jsonPayload["transactions"] = txs
num := big.NewInt(1)
encodedNum := hexutil.EncodeBig(num)
jsonPayload["hash"] = hexutil.Encode(payload.BlockHash)
jsonPayload["parentHash"] = common.BytesToHash([]byte("parent"))
jsonPayload["sha3Uncles"] = common.BytesToHash([]byte("uncles"))
jsonPayload["miner"] = common.BytesToAddress([]byte("miner"))
jsonPayload["stateRoot"] = common.BytesToHash([]byte("state"))
jsonPayload["transactionsRoot"] = common.BytesToHash([]byte("txs"))
jsonPayload["receiptsRoot"] = common.BytesToHash([]byte("receipts"))
jsonPayload["logsBloom"] = gethtypes.BytesToBloom([]byte("bloom"))
jsonPayload["gasLimit"] = hexutil.EncodeUint64(1)
jsonPayload["gasUsed"] = hexutil.EncodeUint64(2)
jsonPayload["timestamp"] = hexutil.EncodeUint64(3)
jsonPayload["number"] = encodedNum
jsonPayload["extraData"] = common.BytesToHash([]byte("extra"))
jsonPayload["totalDifficulty"] = "0x123456"
jsonPayload["difficulty"] = encodedNum
jsonPayload["size"] = encodedNum
jsonPayload["baseFeePerGas"] = encodedNum
jsonPayload["transactions"] = []hexutil.Bytes{encodedBinaryTxs[0]}
wrappedPayload, err := blocks.WrappedExecutionPayload(payload)
require.NoError(t, err)
@@ -912,20 +873,12 @@ func TestReconstructFullBellatrixBlockBatch(t *testing.T) {
require.NoError(t, r.Body.Close())
}()
respJSON := []map[string]interface{}{
{
"jsonrpc": "2.0",
"id": 1,
"result": jsonPayload,
},
{
"jsonrpc": "2.0",
"id": 2,
"result": jsonPayload,
},
respJSON := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"result": []map[string]interface{}{jsonPayload, jsonPayload},
}
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
}))
defer srv.Close()
@@ -1288,6 +1241,10 @@ func fixtures() map[string]interface{} {
BlockHash: foo[:],
Transactions: [][]byte{foo[:]},
}
executionPayloadBodyFixture := &pb.ExecutionPayloadBodyV1{
Transactions: [][]byte{foo[:]},
Withdrawals: []*pb.Withdrawal{},
}
executionPayloadFixtureCapella := &pb.ExecutionPayloadCapella{
ParentHash: foo[:],
FeeRecipient: bar,
@@ -1459,6 +1416,7 @@ func fixtures() map[string]interface{} {
}
return map[string]interface{}{
"ExecutionBlock": executionBlock,
"ExecutionPayloadBody": executionPayloadBodyFixture,
"ExecutionPayload": executionPayloadFixture,
"ExecutionPayloadCapella": executionPayloadFixtureCapella,
"ExecutionPayloadDeneb": executionPayloadFixtureDeneb,
@@ -2007,10 +1965,6 @@ func newPayloadV3Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu
}
func TestCapella_PayloadBodiesByHash(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableOptionalEngineMethods: true,
})
defer resetFn()
t.Run("empty response works", func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
@@ -2067,7 +2021,9 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) {
service := &Service{}
service.rpcClient = rpcClient
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
bRoot := [32]byte{}
copy(bRoot[:], "hash")
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot})
require.NoError(t, err)
require.Equal(t, 1, len(results))
@@ -2113,7 +2069,9 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) {
service := &Service{}
service.rpcClient = rpcClient
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
bRoot := [32]byte{}
copy(bRoot[:], "hash")
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot, bRoot, bRoot})
require.NoError(t, err)
require.Equal(t, 3, len(results))
@@ -2154,7 +2112,9 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) {
service := &Service{}
service.rpcClient = rpcClient
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
bRoot := [32]byte{}
copy(bRoot[:], "hash")
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot})
require.NoError(t, err)
require.Equal(t, 1, len(results))
@@ -2204,7 +2164,9 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) {
service := &Service{}
service.rpcClient = rpcClient
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
bRoot := [32]byte{}
copy(bRoot[:], "hash")
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot})
require.NoError(t, err)
require.Equal(t, 2, len(results))
@@ -2247,7 +2209,9 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) {
service := &Service{}
service.rpcClient = rpcClient
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{})
bRoot := [32]byte{}
copy(bRoot[:], "hash")
results, err := service.GetPayloadBodiesByHash(ctx, []common.Hash{bRoot, bRoot, bRoot})
require.NoError(t, err)
require.Equal(t, 3, len(results))
@@ -2258,10 +2222,6 @@ func TestCapella_PayloadBodiesByHash(t *testing.T) {
}
func TestCapella_PayloadBodiesByRange(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableOptionalEngineMethods: true,
})
defer resetFn()
t.Run("empty response works", func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
@@ -2509,10 +2469,6 @@ func TestCapella_PayloadBodiesByRange(t *testing.T) {
}
func Test_ExchangeCapabilities(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableOptionalEngineMethods: true,
})
defer resetFn()
t.Run("empty response works", func(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")

View File

@@ -28,7 +28,6 @@ go_library(
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/forkchoice:go_default_library",

View File

@@ -11,10 +11,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
// orphanLateBlockFirstThreshold is the number of seconds after which we
// consider a block to be late, and thus a candidate to being reorged.
const orphanLateBlockFirstThreshold = 4
// ProcessAttestationsThreshold is the number of seconds after which we
// process attestations for the current slot
const ProcessAttestationsThreshold = 10
@@ -137,7 +133,8 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
// slot will have secs = 3 below.
func (n *Node) arrivedEarly(genesisTime uint64) (bool, error) {
secs, err := slots.SecondsSinceSlotStart(n.slot, genesisTime, n.timestamp)
return secs < orphanLateBlockFirstThreshold, err
votingWindow := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
return secs < votingWindow, err
}
// arrivedAfterOrphanCheck returns whether this block was inserted after the

View File

@@ -277,6 +277,7 @@ func TestNode_TimeStampsChecks(t *testing.T) {
require.NoError(t, err)
require.Equal(t, false, late)
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
// late block
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
root = [32]byte{'b'}

View File

@@ -3,7 +3,6 @@ package doublylinkedtree
import (
"time"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -98,9 +97,6 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
// This function needs to be called only when proposing a block and all
// attestation processing has already happened.
func (f *ForkChoice) GetProposerHead() [32]byte {
if features.Get().DisableReorgLateBlocks {
return f.CachedHeadRoot()
}
head := f.store.headNode
if head == nil {
return [32]byte{}

View File

@@ -28,6 +28,7 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
}
f.ProcessAttestation(ctx, attesters, root, 0)
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
@@ -125,6 +126,7 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
headRoot, err := f.Head(ctx)
require.NoError(t, err)
require.Equal(t, root, headRoot)
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
f.store.headNode.timestamp -= params.BeaconConfig().SecondsPerSlot - orphanLateBlockFirstThreshold
t.Run("head is weak", func(t *testing.T) {
require.Equal(t, parentRoot, f.GetProposerHead())

View File

@@ -116,6 +116,7 @@ type BeaconNode struct {
initialSyncComplete chan struct{}
BlobStorage *filesystem.BlobStorage
blobRetentionEpochs primitives.Epoch
verifyInitWaiter *verification.InitializerWaiter
}
// New creates a new node instance, sets up configuration options, and registers
@@ -207,6 +208,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
return nil, err
}
beacon.BlobStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx); err != nil {
@@ -228,13 +230,8 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
return nil, err
}
if beacon.finalizedStateAtStartUp != nil {
if err := beacon.BlobStorage.Initialize(beacon.finalizedStateAtStartUp.Slot()); err != nil {
return nil, fmt.Errorf("failed to initialize blob storage: %w", err)
}
} else {
log.Warn("No finalized beacon state at startup, cannot prune blobs")
}
beacon.verifyInitWaiter = verification.NewInitializerWaiter(
beacon.clockWaiter, forkchoice.NewROForkChoice(beacon.forkChoicer), beacon.stateGen)
log.Debugln("Registering P2P Service")
if err := beacon.registerP2P(cliCtx); err != nil {
@@ -401,7 +398,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
log.WithField("database-path", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(b.ctx, dbPath, kv.WithBlobRetentionEpochs(b.blobRetentionEpochs))
d, err := kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return err
}
@@ -424,7 +421,7 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
return errors.Wrap(err, "could not clear database")
}
d, err = kv.NewKVStore(b.ctx, dbPath, kv.WithBlobRetentionEpochs(b.blobRetentionEpochs))
d, err = kv.NewKVStore(b.ctx, dbPath)
if err != nil {
return errors.Wrap(err, "could not create new database")
}
@@ -746,7 +743,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) erro
regularsync.WithInitialSyncComplete(initialSyncComplete),
regularsync.WithStateNotifier(b),
regularsync.WithBlobStorage(b.BlobStorage),
regularsync.WithVerifierWaiter(verification.NewInitializerWaiter(b.clockWaiter, b.forkChoicer, b.stateGen)),
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
)
return b.services.RegisterService(rs)
}
@@ -757,6 +754,9 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
return err
}
opts := []initialsync.Option{
initialsync.WithVerifierWaiter(b.verifyInitWaiter),
}
is := initialsync.NewService(b.ctx, &initialsync.Config{
DB: b.db,
Chain: chainService,
@@ -766,7 +766,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
ClockWaiter: b.clockWaiter,
InitialSyncComplete: complete,
BlobStorage: b.BlobStorage,
})
}, opts...)
return b.services.RegisterService(is)
}

View File

@@ -47,7 +47,6 @@ go_test(
deps = [
"//async:go_default_library",
"//beacon-chain/operations/attestations/kv:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//crypto/bls:go_default_library",

View File

@@ -14,7 +14,6 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
@@ -39,7 +38,6 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//crypto/bls:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -7,7 +7,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -46,30 +45,7 @@ func (c *AttCaches) aggregateUnaggregatedAtts(ctx context.Context, unaggregatedA
// Track the unaggregated attestations that aren't able to aggregate.
leftOverUnaggregatedAtt := make(map[[32]byte]bool)
if features.Get().AggregateParallel {
leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt)
} else {
for _, atts := range attsByDataRoot {
aggregated, err := attaggregation.AggregateDisjointOneBitAtts(atts)
if err != nil {
return errors.Wrap(err, "could not aggregate unaggregated attestations")
}
if aggregated == nil {
return errors.New("could not aggregate unaggregated attestations")
}
if helpers.IsAggregated(aggregated) {
if err := c.SaveAggregatedAttestations([]*ethpb.Attestation{aggregated}); err != nil {
return err
}
} else {
h, err := hashFn(aggregated)
if err != nil {
return err
}
leftOverUnaggregatedAtt[h] = true
}
}
}
leftOverUnaggregatedAtt = c.aggregateParallel(attsByDataRoot, leftOverUnaggregatedAtt)
// Remove the unaggregated attestations from the pool that were successfully aggregated.
for _, att := range unaggregatedAtts {

View File

@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
fssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
@@ -18,11 +17,6 @@ import (
)
func TestKV_Aggregated_AggregateUnaggregatedAttestations(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
AggregateParallel: true,
})
defer resetFn()
cache := NewAttCaches()
priv, err := bls.RandKey()
require.NoError(t, err)

View File

@@ -7,7 +7,6 @@ import (
"testing"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/v4/config/features"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
attaggregation "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/attestation/aggregation/attestations"
@@ -18,11 +17,6 @@ import (
)
func TestBatchAttestations_Multiple(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
AggregateParallel: true,
})
defer resetFn()
s, err := NewService(context.Background(), &Config{Pool: NewPool()})
require.NoError(t, err)

View File

@@ -23,6 +23,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/metadata:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1/metadata"
)
@@ -45,17 +46,17 @@ func InitializeDataMaps() {
},
bytesutil.ToBytes4(params.BeaconConfig().BellatrixForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
return blocks.NewSignedBeaconBlock(
&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{Body: &ethpb.BeaconBlockBodyBellatrix{}}},
&ethpb.SignedBeaconBlockBellatrix{Block: &ethpb.BeaconBlockBellatrix{Body: &ethpb.BeaconBlockBodyBellatrix{ExecutionPayload: &enginev1.ExecutionPayload{}}}},
)
},
bytesutil.ToBytes4(params.BeaconConfig().CapellaForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
return blocks.NewSignedBeaconBlock(
&ethpb.SignedBeaconBlockCapella{Block: &ethpb.BeaconBlockCapella{Body: &ethpb.BeaconBlockBodyCapella{}}},
&ethpb.SignedBeaconBlockCapella{Block: &ethpb.BeaconBlockCapella{Body: &ethpb.BeaconBlockBodyCapella{ExecutionPayload: &enginev1.ExecutionPayloadCapella{}}}},
)
},
bytesutil.ToBytes4(params.BeaconConfig().DenebForkVersion): func() (interfaces.ReadOnlySignedBeaconBlock, error) {
return blocks.NewSignedBeaconBlock(
&ethpb.SignedBeaconBlockDeneb{Block: &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{}}},
&ethpb.SignedBeaconBlockDeneb{Block: &ethpb.BeaconBlockDeneb{Body: &ethpb.BeaconBlockBodyDeneb{ExecutionPayload: &enginev1.ExecutionPayloadDeneb{}}}},
)
},
}

View File

@@ -501,6 +501,13 @@ func (s *Server) SubmitAttesterSlashing(w http.ResponseWriter, r *http.Request)
httputil.HandleError(w, "Could not insert attester slashing into pool: "+err.Error(), http.StatusInternalServerError)
return
}
// notify events
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.AttesterSlashingReceived,
Data: &operation.AttesterSlashingReceivedData{
AttesterSlashing: slashing,
},
})
if !features.Get().DisableBroadcastSlashings {
if err = s.Broadcaster.Broadcast(ctx, slashing); err != nil {
httputil.HandleError(w, "Could not broadcast slashing object: "+err.Error(), http.StatusInternalServerError)
@@ -569,6 +576,15 @@ func (s *Server) SubmitProposerSlashing(w http.ResponseWriter, r *http.Request)
httputil.HandleError(w, "Could not insert proposer slashing into pool: "+err.Error(), http.StatusInternalServerError)
return
}
// notify events
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.ProposerSlashingReceived,
Data: &operation.ProposerSlashingReceivedData{
ProposerSlashing: slashing,
},
})
if !features.Get().DisableBroadcastSlashings {
if err = s.Broadcaster.Broadcast(ctx, slashing); err != nil {
httputil.HandleError(w, "Could not broadcast slashing object: "+err.Error(), http.StatusInternalServerError)

View File

@@ -1205,10 +1205,12 @@ func TestSubmitAttesterSlashing_Ok(t *testing.T) {
}
broadcaster := &p2pMock.MockBroadcaster{}
chainmock := &blockchainmock.ChainService{State: bs}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
ChainInfoFetcher: chainmock,
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
OperationNotifier: chainmock.OperationNotifier(),
}
toSubmit := shared.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing})
@@ -1295,10 +1297,12 @@ func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
}
broadcaster := &p2pMock.MockBroadcaster{}
chainmock := &blockchainmock.ChainService{State: bs}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
ChainInfoFetcher: chainmock,
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
OperationNotifier: chainmock.OperationNotifier(),
}
toSubmit := shared.AttesterSlashingsFromConsensus([]*ethpbv1alpha1.AttesterSlashing{slashing})
@@ -1404,10 +1408,12 @@ func TestSubmitProposerSlashing_Ok(t *testing.T) {
}
broadcaster := &p2pMock.MockBroadcaster{}
chainmock := &blockchainmock.ChainService{State: bs}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
ChainInfoFetcher: chainmock,
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
OperationNotifier: chainmock.OperationNotifier(),
}
toSubmit := shared.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing})
@@ -1486,10 +1492,12 @@ func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
}
broadcaster := &p2pMock.MockBroadcaster{}
chainmock := &blockchainmock.ChainService{State: bs}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
ChainInfoFetcher: chainmock,
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
OperationNotifier: chainmock.OperationNotifier(),
}
toSubmit := shared.ProposerSlashingsFromConsensus([]*ethpbv1alpha1.ProposerSlashing{slashing})

View File

@@ -38,6 +38,7 @@ go_test(
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -44,6 +44,10 @@ const (
PayloadAttributesTopic = "payload_attributes"
// BlobSidecarTopic represents a new blob sidecar event topic
BlobSidecarTopic = "blob_sidecar"
// ProposerSlashingTopic represents a new proposer slashing event topic
ProposerSlashingTopic = "proposer_slashing"
// AttesterSlashingTopic represents a new attester slashing event topic
AttesterSlashingTopic = "attester_slashing"
)
const topicDataMismatch = "Event data type %T does not correspond to event topic %s"
@@ -61,6 +65,8 @@ var casesHandled = map[string]bool{
BLSToExecutionChangeTopic: true,
PayloadAttributesTopic: true,
BlobSidecarTopic: true,
ProposerSlashingTopic: true,
AttesterSlashingTopic: true,
}
// StreamEvents provides an endpoint to subscribe to the beacon node Server-Sent-Events stream.
@@ -190,6 +196,26 @@ func handleBlockOperationEvents(w http.ResponseWriter, flusher http.Flusher, req
KzgCommitment: hexutil.Encode(blobData.Blob.KzgCommitment),
}
send(w, flusher, BlobSidecarTopic, blobEvent)
case operation.AttesterSlashingReceived:
if _, ok := requestedTopics[AttesterSlashingTopic]; !ok {
return
}
attesterSlashingData, ok := event.Data.(*operation.AttesterSlashingReceivedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, AttesterSlashingTopic)
return
}
send(w, flusher, AttesterSlashingTopic, shared.AttesterSlashingFromConsensus(attesterSlashingData.AttesterSlashing))
case operation.ProposerSlashingReceived:
if _, ok := requestedTopics[ProposerSlashingTopic]; !ok {
return
}
proposerSlashingData, ok := event.Data.(*operation.ProposerSlashingReceivedData)
if !ok {
write(w, flusher, topicDataMismatch, event.Data, ProposerSlashingTopic)
return
}
send(w, flusher, ProposerSlashingTopic, shared.ProposerSlashingFromConsensus(proposerSlashingData.ProposerSlashing))
}
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -41,7 +42,15 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
OperationNotifier: &mockChain.MockOperationNotifier{},
}
topics := []string{AttestationTopic, VoluntaryExitTopic, SyncCommitteeContributionTopic, BLSToExecutionChangeTopic, BlobSidecarTopic}
topics := []string{
AttestationTopic,
VoluntaryExitTopic,
SyncCommitteeContributionTopic,
BLSToExecutionChangeTopic,
BlobSidecarTopic,
AttesterSlashingTopic,
ProposerSlashingTopic,
}
for i, topic := range topics {
topics[i] = "topics=" + topic
}
@@ -124,6 +133,65 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
Blob: &vblob,
},
})
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.AttesterSlashingReceived,
Data: &operation.AttesterSlashingReceivedData{
AttesterSlashing: &eth.AttesterSlashing{
Attestation_1: &eth.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
Data: &eth.AttestationData{
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Source: &eth.Checkpoint{
Root: make([]byte, fieldparams.RootLength),
},
Target: &eth.Checkpoint{
Root: make([]byte, fieldparams.RootLength),
},
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
Attestation_2: &eth.IndexedAttestation{
AttestingIndices: []uint64{0, 1},
Data: &eth.AttestationData{
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
Source: &eth.Checkpoint{
Root: make([]byte, fieldparams.RootLength),
},
Target: &eth.Checkpoint{
Root: make([]byte, fieldparams.RootLength),
},
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
},
},
})
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.ProposerSlashingReceived,
Data: &operation.ProposerSlashingReceivedData{
ProposerSlashing: &eth.ProposerSlashing{
Header_1: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
ParentRoot: make([]byte, fieldparams.RootLength),
StateRoot: make([]byte, fieldparams.RootLength),
BodyRoot: make([]byte, fieldparams.RootLength),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
Header_2: &eth.SignedBeaconBlockHeader{
Header: &eth.BeaconBlockHeader{
ParentRoot: make([]byte, fieldparams.RootLength),
StateRoot: make([]byte, fieldparams.RootLength),
BodyRoot: make([]byte, fieldparams.RootLength),
},
Signature: make([]byte, fieldparams.BLSSignatureLength),
},
},
},
})
time.Sleep(1 * time.Second)
request.Context().Done()
@@ -325,6 +393,12 @@ data: {"message":{"validator_index":"0","from_bls_pubkey":"0x0000000000000000000
event: blob_sidecar
data: {"block_root":"0xc78009fdf07fc56a11f122370658a353aaa542ed63e44c4bc15ff4cd105ab33c","index":"0","slot":"0","kzg_commitment":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","versioned_hash":"0x01b0761f87b081d5cf10757ccc89f12be355c70e2e29df288b65b30710dcbcd1"}
event: attester_slashing
data: {"attestation_1":{"attesting_indices":["0","1"],"data":{"slot":"0","index":"0","beacon_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","source":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"target":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"}},"signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},"attestation_2":{"attesting_indices":["0","1"],"data":{"slot":"0","index":"0","beacon_block_root":"0x0000000000000000000000000000000000000000000000000000000000000000","source":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"target":{"epoch":"0","root":"0x0000000000000000000000000000000000000000000000000000000000000000"}},"signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}}
event: proposer_slashing
data: {"signed_header_1":{"message":{"slot":"0","proposer_index":"0","parent_root":"0x0000000000000000000000000000000000000000000000000000000000000000","state_root":"0x0000000000000000000000000000000000000000000000000000000000000000","body_root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"},"signed_header_2":{"message":{"slot":"0","proposer_index":"0","parent_root":"0x0000000000000000000000000000000000000000000000000000000000000000","state_root":"0x0000000000000000000000000000000000000000000000000000000000000000","body_root":"0x0000000000000000000000000000000000000000000000000000000000000000"},"signature":"0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}}
`
const stateResult = `event: head

View File

@@ -40,7 +40,10 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["errors_test.go"],
srcs = [
"errors_test.go",
"request_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/rpc/lookup:go_default_library",

View File

@@ -769,32 +769,36 @@ func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlash
func ProposerSlashingsFromConsensus(src []*eth.ProposerSlashing) []*ProposerSlashing {
proposerSlashings := make([]*ProposerSlashing, len(src))
for i, s := range src {
proposerSlashings[i] = &ProposerSlashing{
SignedHeader1: &SignedBeaconBlockHeader{
Message: &BeaconBlockHeader{
Slot: fmt.Sprintf("%d", s.Header_1.Header.Slot),
ProposerIndex: fmt.Sprintf("%d", s.Header_1.Header.ProposerIndex),
ParentRoot: hexutil.Encode(s.Header_1.Header.ParentRoot),
StateRoot: hexutil.Encode(s.Header_1.Header.StateRoot),
BodyRoot: hexutil.Encode(s.Header_1.Header.BodyRoot),
},
Signature: hexutil.Encode(s.Header_1.Signature),
},
SignedHeader2: &SignedBeaconBlockHeader{
Message: &BeaconBlockHeader{
Slot: fmt.Sprintf("%d", s.Header_2.Header.Slot),
ProposerIndex: fmt.Sprintf("%d", s.Header_2.Header.ProposerIndex),
ParentRoot: hexutil.Encode(s.Header_2.Header.ParentRoot),
StateRoot: hexutil.Encode(s.Header_2.Header.StateRoot),
BodyRoot: hexutil.Encode(s.Header_2.Header.BodyRoot),
},
Signature: hexutil.Encode(s.Header_2.Signature),
},
}
proposerSlashings[i] = ProposerSlashingFromConsensus(s)
}
return proposerSlashings
}
func ProposerSlashingFromConsensus(src *eth.ProposerSlashing) *ProposerSlashing {
return &ProposerSlashing{
SignedHeader1: &SignedBeaconBlockHeader{
Message: &BeaconBlockHeader{
Slot: fmt.Sprintf("%d", src.Header_1.Header.Slot),
ProposerIndex: fmt.Sprintf("%d", src.Header_1.Header.ProposerIndex),
ParentRoot: hexutil.Encode(src.Header_1.Header.ParentRoot),
StateRoot: hexutil.Encode(src.Header_1.Header.StateRoot),
BodyRoot: hexutil.Encode(src.Header_1.Header.BodyRoot),
},
Signature: hexutil.Encode(src.Header_1.Signature),
},
SignedHeader2: &SignedBeaconBlockHeader{
Message: &BeaconBlockHeader{
Slot: fmt.Sprintf("%d", src.Header_2.Header.Slot),
ProposerIndex: fmt.Sprintf("%d", src.Header_2.Header.ProposerIndex),
ParentRoot: hexutil.Encode(src.Header_2.Header.ParentRoot),
StateRoot: hexutil.Encode(src.Header_2.Header.StateRoot),
BodyRoot: hexutil.Encode(src.Header_2.Header.BodyRoot),
},
Signature: hexutil.Encode(src.Header_2.Signature),
},
}
}
func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlashing, error) {
if src == nil {
return nil, errNilValue
@@ -875,54 +879,58 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash
func AttesterSlashingsFromConsensus(src []*eth.AttesterSlashing) []*AttesterSlashing {
attesterSlashings := make([]*AttesterSlashing, len(src))
for i, s := range src {
a1AttestingIndices := make([]string, len(s.Attestation_1.AttestingIndices))
for j, ix := range s.Attestation_1.AttestingIndices {
a1AttestingIndices[j] = fmt.Sprintf("%d", ix)
}
a2AttestingIndices := make([]string, len(s.Attestation_2.AttestingIndices))
for j, ix := range s.Attestation_2.AttestingIndices {
a2AttestingIndices[j] = fmt.Sprintf("%d", ix)
}
attesterSlashings[i] = &AttesterSlashing{
Attestation1: &IndexedAttestation{
AttestingIndices: a1AttestingIndices,
Data: &AttestationData{
Slot: fmt.Sprintf("%d", s.Attestation_1.Data.Slot),
CommitteeIndex: fmt.Sprintf("%d", s.Attestation_1.Data.CommitteeIndex),
BeaconBlockRoot: hexutil.Encode(s.Attestation_1.Data.BeaconBlockRoot),
Source: &Checkpoint{
Epoch: fmt.Sprintf("%d", s.Attestation_1.Data.Source.Epoch),
Root: hexutil.Encode(s.Attestation_1.Data.Source.Root),
},
Target: &Checkpoint{
Epoch: fmt.Sprintf("%d", s.Attestation_1.Data.Target.Epoch),
Root: hexutil.Encode(s.Attestation_1.Data.Target.Root),
},
},
Signature: hexutil.Encode(s.Attestation_1.Signature),
},
Attestation2: &IndexedAttestation{
AttestingIndices: a2AttestingIndices,
Data: &AttestationData{
Slot: fmt.Sprintf("%d", s.Attestation_2.Data.Slot),
CommitteeIndex: fmt.Sprintf("%d", s.Attestation_2.Data.CommitteeIndex),
BeaconBlockRoot: hexutil.Encode(s.Attestation_2.Data.BeaconBlockRoot),
Source: &Checkpoint{
Epoch: fmt.Sprintf("%d", s.Attestation_2.Data.Source.Epoch),
Root: hexutil.Encode(s.Attestation_2.Data.Source.Root),
},
Target: &Checkpoint{
Epoch: fmt.Sprintf("%d", s.Attestation_2.Data.Target.Epoch),
Root: hexutil.Encode(s.Attestation_2.Data.Target.Root),
},
},
Signature: hexutil.Encode(s.Attestation_2.Signature),
},
}
attesterSlashings[i] = AttesterSlashingFromConsensus(s)
}
return attesterSlashings
}
func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing {
a1AttestingIndices := make([]string, len(src.Attestation_1.AttestingIndices))
for j, ix := range src.Attestation_1.AttestingIndices {
a1AttestingIndices[j] = fmt.Sprintf("%d", ix)
}
a2AttestingIndices := make([]string, len(src.Attestation_2.AttestingIndices))
for j, ix := range src.Attestation_2.AttestingIndices {
a2AttestingIndices[j] = fmt.Sprintf("%d", ix)
}
return &AttesterSlashing{
Attestation1: &IndexedAttestation{
AttestingIndices: a1AttestingIndices,
Data: &AttestationData{
Slot: fmt.Sprintf("%d", src.Attestation_1.Data.Slot),
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_1.Data.CommitteeIndex),
BeaconBlockRoot: hexutil.Encode(src.Attestation_1.Data.BeaconBlockRoot),
Source: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Source.Epoch),
Root: hexutil.Encode(src.Attestation_1.Data.Source.Root),
},
Target: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_1.Data.Target.Epoch),
Root: hexutil.Encode(src.Attestation_1.Data.Target.Root),
},
},
Signature: hexutil.Encode(src.Attestation_1.Signature),
},
Attestation2: &IndexedAttestation{
AttestingIndices: a2AttestingIndices,
Data: &AttestationData{
Slot: fmt.Sprintf("%d", src.Attestation_2.Data.Slot),
CommitteeIndex: fmt.Sprintf("%d", src.Attestation_2.Data.CommitteeIndex),
BeaconBlockRoot: hexutil.Encode(src.Attestation_2.Data.BeaconBlockRoot),
Source: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Source.Epoch),
Root: hexutil.Encode(src.Attestation_2.Data.Source.Root),
},
Target: &Checkpoint{
Epoch: fmt.Sprintf("%d", src.Attestation_2.Data.Target.Epoch),
Root: hexutil.Encode(src.Attestation_2.Data.Target.Root),
},
},
Signature: hexutil.Encode(src.Attestation_2.Signature),
},
}
}
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
if src == nil {
return nil, errNilValue

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"net/http"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/gorilla/mux"
@@ -15,15 +16,15 @@ import (
)
func UintFromQuery(w http.ResponseWriter, r *http.Request, name string, required bool) (string, uint64, bool) {
raw := r.URL.Query().Get(name)
if raw == "" && !required {
trimmed := strings.ReplaceAll(r.URL.Query().Get(name), " ", "")
if trimmed == "" && !required {
return "", 0, true
}
v, valid := ValidateUint(w, name, raw)
v, valid := ValidateUint(w, name, trimmed)
if !valid {
return "", 0, false
}
return raw, v, true
return trimmed, v, true
}
func UintFromRoute(w http.ResponseWriter, r *http.Request, name string) (string, uint64, bool) {

View File

@@ -0,0 +1,96 @@
package shared
import (
"math"
"net/http/httptest"
"testing"
)
func TestUintFromQuery_BuilderBoostFactor(t *testing.T) {
type args struct {
raw string
}
tests := []struct {
name string
args args
wantRaw string
wantValue uint64
wantOK bool
}{
{
name: "builder boost factor of 0 returns 0",
args: args{
raw: "0",
},
wantRaw: "0",
wantValue: 0,
wantOK: true,
},
{
name: "builder boost factor of the default, 100 returns 100",
args: args{raw: "100"},
wantRaw: "100",
wantValue: 100,
wantOK: true,
},
{
name: "builder boost factor max uint64 returns max uint64",
args: args{raw: "18446744073709551615"},
wantRaw: "18446744073709551615",
wantValue: math.MaxUint64,
wantOK: true,
},
{
name: "builder boost factor as a percentage returns error",
args: args{raw: "0.30"},
wantRaw: "",
wantValue: 0,
wantOK: false,
},
{
name: "builder boost factor negative int returns error",
args: args{raw: "-100"},
wantRaw: "",
wantValue: 0,
wantOK: false,
},
{
name: "builder boost factor max uint64 +1 returns error",
args: args{raw: "18446744073709551616"},
wantRaw: "",
wantValue: 0,
wantOK: false,
},
{
name: "builder boost factor of invalid string returns error",
args: args{raw: "asdf"},
wantRaw: "",
wantValue: 0,
wantOK: false,
},
{
name: "builder boost factor of number bigger than uint64 string returns error",
args: args{raw: "9871398721983721908372190837219837129803721983719283798217390821739081273918273918273918273981273982139812739821"},
wantRaw: "",
wantValue: 0,
wantOK: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
query := "builder_boost_factor"
bbreq := httptest.NewRequest("GET", "/eth/v3/validator/blocks/{slot}?builder_boost_factor="+tt.args.raw, nil)
w := httptest.NewRecorder()
got, got1, got2 := UintFromQuery(w, bbreq, query, false)
if got != tt.wantRaw {
t.Errorf("UintFromQuery() got = %v, wantRaw %v", got, tt.wantRaw)
}
if got1 != tt.wantValue {
t.Errorf("UintFromQuery() got1 = %v, wantRaw %v", got1, tt.wantValue)
}
if got2 != tt.wantOK {
t.Errorf("UintFromQuery() got2 = %v, wantRaw %v", got2, tt.wantOK)
}
})
}
}

View File

@@ -46,6 +46,7 @@ go_library(
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_google_protobuf//types/known/wrapperspb:go_default_library",
],
)

View File

@@ -19,6 +19,7 @@ import (
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"go.opencensus.io/trace"
"google.golang.org/protobuf/types/known/wrapperspb"
)
type blockType uint8
@@ -155,6 +156,15 @@ func (s *Server) ProduceBlockV3(w http.ResponseWriter, r *http.Request) {
rawGraffiti := r.URL.Query().Get("graffiti")
rawSkipRandaoVerification := r.URL.Query().Get("skip_randao_verification")
var bbFactor *wrapperspb.UInt64Value // default the factor via fall back
rawBbFactor, bbValue, ok := shared.UintFromQuery(w, r, "builder_boost_factor", false)
if !ok {
return
}
if rawBbFactor != "" {
bbFactor = &wrapperspb.UInt64Value{Value: bbValue}
}
slot, valid := shared.ValidateUint(w, "slot", rawSlot)
if !valid {
return
@@ -182,10 +192,11 @@ func (s *Server) ProduceBlockV3(w http.ResponseWriter, r *http.Request) {
}
s.produceBlockV3(ctx, w, r, &eth.BlockRequest{
Slot: primitives.Slot(slot),
RandaoReveal: randaoReveal,
Graffiti: graffiti,
SkipMevBoost: false,
Slot: primitives.Slot(slot),
RandaoReveal: randaoReveal,
Graffiti: graffiti,
SkipMevBoost: false,
BuilderBoostFactor: bbFactor,
}, any)
}

View File

@@ -201,7 +201,6 @@ go_test(
"status_mainnet_test.go",
"status_test.go",
"sync_committee_test.go",
"unblinder_test.go",
"validator_test.go",
],
embed = [":go_default_library"],

View File

@@ -2,7 +2,6 @@ package validator
import (
"context"
"encoding/hex"
"fmt"
"strings"
"sync"
@@ -17,6 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
@@ -38,9 +38,8 @@ import (
var eth1DataNotification bool
const (
// CouldNotDecodeBlock means that a signed beacon block couldn't be created from the block present in the request.
CouldNotDecodeBlock = "Could not decode block"
eth1dataTimeout = 2 * time.Second
eth1dataTimeout = 2 * time.Second
defaultBuilderBoostFactor = uint64(100)
)
// GetBeaconBlock is called by a proposer during its assigned slot to request a block to sign
@@ -110,7 +109,12 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
}
sBlk.SetProposerIndex(idx)
if err = vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost); err != nil {
builderBoostFactor := defaultBuilderBoostFactor
if req.BuilderBoostFactor != nil {
builderBoostFactor = req.BuilderBoostFactor.Value
}
if err = vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost, builderBoostFactor); err != nil {
return nil, errors.Wrap(err, "could not build block in parallel")
}
@@ -130,7 +134,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
return vs.constructGenericBeaconBlock(sBlk, bundleCache.get(req.Slot))
}
func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.SignedBeaconBlock, head state.BeaconState, skipMevBoost bool) error {
func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.SignedBeaconBlock, head state.BeaconState, skipMevBoost bool, builderBoostFactor uint64) error {
// Build consensus fields in background
var wg sync.WaitGroup
wg.Add(1)
@@ -188,7 +192,7 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
}
}
if err := setExecutionData(ctx, sBlk, localPayload, builderPayload, builderKzgCommitments); err != nil {
if err := setExecutionData(ctx, sBlk, localPayload, builderPayload, builderKzgCommitments, builderBoostFactor); err != nil {
return status.Errorf(codes.Internal, "Could not set execution data: %v", err)
}
@@ -197,86 +201,137 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
return nil
}
// ProposeBeaconBlock is called by a proposer during its assigned slot to create a block in an attempt
// to get it processed by the beacon node as the canonical head.
// ProposeBeaconBlock handles the proposal of beacon blocks.
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
defer span.End()
blk, err := blocks.NewSignedBeaconBlock(req.Block)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", CouldNotDecodeBlock, err)
if req == nil {
return nil, status.Errorf(codes.InvalidArgument, "empty request")
}
unblinder, err := newUnblinder(blk, vs.BlockBuilder)
block, err := blocks.NewSignedBeaconBlock(req.Block)
if err != nil {
return nil, errors.Wrap(err, "could not create unblinder")
}
blinded := unblinder.b.IsBlinded() //
var scs []*ethpb.BlobSidecar
blk, scs, err = unblinder.unblindBuilderBlock(ctx)
if err != nil {
return nil, errors.Wrap(err, "could not unblind builder block")
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
}
// Broadcast the new block to the network.
blkPb, err := blk.Proto()
if err != nil {
return nil, errors.Wrap(err, "could not get protobuf block")
var sidecars []*ethpb.BlobSidecar
if block.IsBlinded() {
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
} else {
sidecars, err = vs.handleUnblindedBlock(block, req)
}
if err := vs.P2P.Broadcast(ctx, blkPb); err != nil {
return nil, fmt.Errorf("could not broadcast block: %v", err)
if err != nil {
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
}
root, err := blk.Block().HashTreeRoot()
root, err := block.Block().HashTreeRoot()
if err != nil {
return nil, fmt.Errorf("could not tree hash block: %v", err)
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
}
log.WithFields(logrus.Fields{
"blockRoot": hex.EncodeToString(root[:]),
}).Debug("Broadcasting block")
if blk.Version() >= version.Deneb {
if !blinded {
dbBlockContents := req.GetDeneb()
if dbBlockContents == nil {
return nil, errors.New("signed beacon block contents is empty")
}
scs, err = buildBlobSidecars(blk, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
if err != nil {
return nil, fmt.Errorf("could not build blob sidecars: %v", err)
}
}
for i, sc := range scs {
if err := vs.P2P.BroadcastBlob(ctx, uint64(i), sc); err != nil {
log.WithError(err).Error("Could not broadcast blob")
}
readOnlySc, err := blocks.NewROBlobWithRoot(sc, root)
if err != nil {
return nil, fmt.Errorf("could not create ROBlob: %v", err)
}
verifiedSc := blocks.NewVerifiedROBlob(readOnlySc)
if err := vs.BlobReceiver.ReceiveBlob(ctx, verifiedSc); err != nil {
log.WithError(err).Error("Could not receive blob")
}
var wg sync.WaitGroup
errChan := make(chan error, 1)
wg.Add(1)
go func() {
defer wg.Done()
if err := vs.broadcastReceiveBlock(ctx, block, root); err != nil {
errChan <- errors.Wrap(err, "broadcast/receive block failed")
return
}
errChan <- nil
}()
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
}
if err := vs.BlockReceiver.ReceiveBlock(ctx, blk, root); err != nil {
return nil, fmt.Errorf("could not process beacon block: %v", err)
wg.Wait()
if err := <-errChan; err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
}
log.WithField("slot", blk.Block().Slot()).Debugf(
"Block proposal received via RPC")
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
// handleBlindedBlock processes blinded beacon blocks.
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
if block.Version() < version.Bellatrix {
return nil, nil, errors.New("pre-Bellatrix blinded block")
}
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
return nil, nil, errors.New("unconfigured block builder")
}
copiedBlock, err := block.Copy()
if err != nil {
return nil, nil, err
}
payload, bundle, err := vs.BlockBuilder.SubmitBlindedBlock(ctx, block)
if err != nil {
return nil, nil, errors.Wrap(err, "submit blinded block failed")
}
if err := copiedBlock.Unblind(payload); err != nil {
return nil, nil, errors.Wrap(err, "unblind failed")
}
sidecars, err := unblindBlobsSidecars(copiedBlock, bundle)
if err != nil {
return nil, nil, errors.Wrap(err, "unblind sidecars failed")
}
return copiedBlock, sidecars, nil
}
// handleUnblindedBlock processes unblinded beacon blocks.
func (vs *Server) handleUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
dbBlockContents := req.GetDeneb()
if dbBlockContents == nil {
return nil, nil
}
return buildBlobSidecars(block, dbBlockContents.Blobs, dbBlockContents.KzgProofs)
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
protoBlock, err := block.Proto()
if err != nil {
return errors.Wrap(err, "protobuf conversion failed")
}
if err := vs.P2P.Broadcast(ctx, protoBlock); err != nil {
return errors.Wrap(err, "broadcast failed")
}
vs.BlockNotifier.BlockFeed().Send(&feed.Event{
Type: blockfeed.ReceivedBlock,
Data: &blockfeed.ReceivedBlockData{SignedBlock: blk},
Data: &blockfeed.ReceivedBlockData{SignedBlock: block},
})
return vs.BlockReceiver.ReceiveBlock(ctx, block, root, nil)
}
return &ethpb.ProposeResponse{
BlockRoot: root[:],
}, nil
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
for i, sc := range sidecars {
if err := vs.P2P.BroadcastBlob(ctx, uint64(i), sc); err != nil {
return errors.Wrap(err, "broadcast blob failed")
}
readOnlySc, err := blocks.NewROBlobWithRoot(sc, root)
if err != nil {
return errors.Wrap(err, "ROBlob creation failed")
}
verifiedBlob := blocks.NewVerifiedROBlob(readOnlySc)
if err := vs.BlobReceiver.ReceiveBlob(ctx, verifiedBlob); err != nil {
return errors.Wrap(err, "receive blob failed")
}
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.BlobSidecarReceived,
Data: &operation.BlobSidecarReceivedData{Blob: &verifiedBlob},
})
}
return nil
}
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
@@ -308,7 +363,7 @@ func (vs *Server) PrepareBeaconProposer(
}
if len(validatorIndices) != 0 {
log.WithFields(logrus.Fields{
"validatorIndices": validatorIndices,
"validatorCount": len(validatorIndices),
}).Info("Updated fee recipient addresses for validator indices")
}
return &emptypb.Empty{}, nil

View File

@@ -41,7 +41,7 @@ var emptyTransactionsRoot = [32]byte{127, 254, 36, 30, 166, 1, 135, 253, 176, 24
const blockBuilderTimeout = 1 * time.Second
// Sets the execution data for the block. Execution data can come from local EL client or remote builder depends on validator registration and circuit breaker conditions.
func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, localPayload, builderPayload interfaces.ExecutionData, builderKzgCommitments [][]byte) error {
func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, localPayload, builderPayload interfaces.ExecutionData, builderKzgCommitments [][]byte, builderBoostFactor uint64) error {
_, span := trace.StartSpan(ctx, "ProposerServer.setExecutionData")
defer span.End()
@@ -80,16 +80,22 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
}
// Use builder payload if the following in true:
// builder_bid_value * 100 > local_block_value * (local-block-value-boost + 100)
// builder_bid_value * builderBoostFactor(default 100) > local_block_value * (local-block-value-boost + 100)
boost := params.BeaconConfig().LocalBlockValueBoost
higherValueBuilder := builderValueGwei*100 > localValueGwei*(100+boost)
higherValueBuilder := builderValueGwei*builderBoostFactor > localValueGwei*(100+boost)
if boost > 0 && builderBoostFactor != defaultBuilderBoostFactor {
log.WithFields(logrus.Fields{
"localGweiValue": localValueGwei,
"localBoostPercentage": boost,
"builderGweiValue": builderValueGwei,
"builderBoostFactor": builderBoostFactor,
}).Warn("Proposer: both local boost and builder boost are using non default values")
}
// If we can't get the builder value, just use local block.
if higherValueBuilder && withdrawalsMatched { // Builder value is higher and withdrawals match.
blk.SetBlinded(true)
if err := setBuilderExecution(blk, builderPayload, builderKzgCommitments); err != nil {
log.WithError(err).Warn("Proposer: failed to set builder payload")
blk.SetBlinded(false)
return setLocalExecution(blk, localPayload)
} else {
return nil
@@ -100,20 +106,20 @@ func setExecutionData(ctx context.Context, blk interfaces.SignedBeaconBlock, loc
"localGweiValue": localValueGwei,
"localBoostPercentage": boost,
"builderGweiValue": builderValueGwei,
"builderBoostFactor": builderBoostFactor,
}).Warn("Proposer: using local execution payload because higher value")
}
span.AddAttributes(
trace.BoolAttribute("higherValueBuilder", higherValueBuilder),
trace.Int64Attribute("localGweiValue", int64(localValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
trace.Int64Attribute("localBoostPercentage", int64(boost)), // lint:ignore uintcast -- This is OK for tracing.
trace.Int64Attribute("builderGweiValue", int64(builderValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
trace.Int64Attribute("localGweiValue", int64(localValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
trace.Int64Attribute("localBoostPercentage", int64(boost)), // lint:ignore uintcast -- This is OK for tracing.
trace.Int64Attribute("builderGweiValue", int64(builderValueGwei)), // lint:ignore uintcast -- This is OK for tracing.
trace.Int64Attribute("builderBoostFactor", int64(builderBoostFactor)), // lint:ignore uintcast -- This is OK for tracing.
)
return setLocalExecution(blk, localPayload)
default: // Bellatrix case.
blk.SetBlinded(true)
if err := setBuilderExecution(blk, builderPayload, builderKzgCommitments); err != nil {
log.WithError(err).Warn("Proposer: failed to set builder payload")
blk.SetBlinded(false)
return setLocalExecution(blk, localPayload)
} else {
return nil
@@ -315,9 +321,6 @@ func setExecution(blk interfaces.SignedBeaconBlock, execution interfaces.Executi
return errors.New("execution is nil")
}
// Set the blinded status of the block
blk.SetBlinded(isBlinded)
// Set the execution data for the block
errMessage := "failed to set local execution"
if isBlinded {

View File

@@ -2,6 +2,7 @@ package validator
import (
"context"
"math"
"math/big"
"testing"
"time"
@@ -96,7 +97,7 @@ func TestServer_setExecutionData(t *testing.T) {
builderPayload, builderKzgCommitments, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.DeepEqual(t, [][]uint8(nil), builderKzgCommitments)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments))
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments, defaultBuilderBoostFactor))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(1), e.BlockNumber()) // Local block
@@ -156,7 +157,7 @@ func TestServer_setExecutionData(t *testing.T) {
builderPayload, builderKzgCommitments, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.DeepEqual(t, [][]uint8(nil), builderKzgCommitments)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments))
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments, defaultBuilderBoostFactor))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(1), e.BlockNumber()) // Local block because incorrect withdrawals
@@ -219,11 +220,137 @@ func TestServer_setExecutionData(t *testing.T) {
builderPayload, builderKzgCommitments, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.DeepEqual(t, [][]uint8(nil), builderKzgCommitments)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments))
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments, defaultBuilderBoostFactor))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(2), e.BlockNumber()) // Builder block
})
t.Run("Max builder boost factor should return builder", func(t *testing.T) {
blk, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockCapella())
require.NoError(t, err)
require.NoError(t, vs.BeaconDB.SaveRegistrationsByValidatorIDs(ctx, []primitives.ValidatorIndex{blk.Block().ProposerIndex()},
[]*ethpb.ValidatorRegistrationV1{{FeeRecipient: make([]byte, fieldparams.FeeRecipientLength), Timestamp: uint64(time.Now().Unix()), Pubkey: make([]byte, fieldparams.BLSPubkeyLength)}}))
ti, err := slots.ToTime(uint64(time.Now().Unix()), 0)
require.NoError(t, err)
sk, err := bls.RandKey()
require.NoError(t, err)
wr, err := ssz.WithdrawalSliceRoot(withdrawals, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
builderValue := bytesutil.ReverseByteOrder(big.NewInt(1e9).Bytes())
bid := &ethpb.BuilderBidCapella{
Header: &v1.ExecutionPayloadHeaderCapella{
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: bytesutil.PadTo([]byte{1}, fieldparams.RootLength),
ParentHash: params.BeaconConfig().ZeroHash[:],
Timestamp: uint64(ti.Unix()),
BlockNumber: 2,
WithdrawalsRoot: wr[:],
},
Pubkey: sk.PublicKey().Marshal(),
Value: bytesutil.PadTo(builderValue, 32),
}
d := params.BeaconConfig().DomainApplicationBuilder
domain, err := signing.ComputeDomain(d, nil, nil)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(bid, domain)
require.NoError(t, err)
sBid := &ethpb.SignedBuilderBidCapella{
Message: bid,
Signature: sk.Sign(sr[:]).Marshal(),
}
vs.BlockBuilder = &builderTest.MockBuilderService{
BidCapella: sBid,
HasConfigured: true,
Cfg: &builderTest.Config{BeaconDB: beaconDB},
}
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
require.NoError(t, err)
chain := &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New(), Genesis: time.Now(), Block: wb}
vs.ForkFetcher = chain
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
vs.TimeFetcher = chain
vs.HeadFetcher = chain
b := blk.Block()
localPayload, _, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
builderPayload, builderKzgCommitments, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.DeepEqual(t, [][]uint8(nil), builderKzgCommitments)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments, math.MaxUint64))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(2), e.BlockNumber()) // builder block
})
t.Run("Builder builder has higher value but forced to local payload with builder boost factor", func(t *testing.T) {
blk, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockCapella())
require.NoError(t, err)
require.NoError(t, vs.BeaconDB.SaveRegistrationsByValidatorIDs(ctx, []primitives.ValidatorIndex{blk.Block().ProposerIndex()},
[]*ethpb.ValidatorRegistrationV1{{FeeRecipient: make([]byte, fieldparams.FeeRecipientLength), Timestamp: uint64(time.Now().Unix()), Pubkey: make([]byte, fieldparams.BLSPubkeyLength)}}))
ti, err := slots.ToTime(uint64(time.Now().Unix()), 0)
require.NoError(t, err)
sk, err := bls.RandKey()
require.NoError(t, err)
wr, err := ssz.WithdrawalSliceRoot(withdrawals, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
builderValue := bytesutil.ReverseByteOrder(big.NewInt(1e9).Bytes())
bid := &ethpb.BuilderBidCapella{
Header: &v1.ExecutionPayloadHeaderCapella{
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: bytesutil.PadTo([]byte{1}, fieldparams.RootLength),
ParentHash: params.BeaconConfig().ZeroHash[:],
Timestamp: uint64(ti.Unix()),
BlockNumber: 2,
WithdrawalsRoot: wr[:],
},
Pubkey: sk.PublicKey().Marshal(),
Value: bytesutil.PadTo(builderValue, 32),
}
d := params.BeaconConfig().DomainApplicationBuilder
domain, err := signing.ComputeDomain(d, nil, nil)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(bid, domain)
require.NoError(t, err)
sBid := &ethpb.SignedBuilderBidCapella{
Message: bid,
Signature: sk.Sign(sr[:]).Marshal(),
}
vs.BlockBuilder = &builderTest.MockBuilderService{
BidCapella: sBid,
HasConfigured: true,
Cfg: &builderTest.Config{BeaconDB: beaconDB},
}
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
require.NoError(t, err)
chain := &blockchainTest.ChainService{ForkChoiceStore: doublylinkedtree.New(), Genesis: time.Now(), Block: wb}
vs.ForkFetcher = chain
vs.ForkchoiceFetcher.SetForkChoiceGenesisTime(uint64(time.Now().Unix()))
vs.TimeFetcher = chain
vs.HeadFetcher = chain
b := blk.Block()
localPayload, _, err := vs.getLocalPayload(ctx, b, capellaTransitionState)
require.NoError(t, err)
builderPayload, builderKzgCommitments, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.DeepEqual(t, [][]uint8(nil), builderKzgCommitments)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments, 0))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(1), e.BlockNumber()) // local block
})
t.Run("Builder configured. Local block has higher value", func(t *testing.T) {
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockCapella())
require.NoError(t, err)
@@ -234,14 +361,14 @@ func TestServer_setExecutionData(t *testing.T) {
builderPayload, builderKzgCommitments, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.DeepEqual(t, [][]uint8(nil), builderKzgCommitments)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments))
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments, defaultBuilderBoostFactor))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
require.LogsContain(t, hook, "builderGweiValue=1 localBoostPercentage=0 localGweiValue=2")
})
t.Run("Builder configured. Local block and boost has higher value", func(t *testing.T) {
t.Run("Builder configured. Local block and local boost has higher value", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
cfg.LocalBlockValueBoost = 1 // Boost 1%.
params.OverrideBeaconConfig(cfg)
@@ -255,7 +382,7 @@ func TestServer_setExecutionData(t *testing.T) {
builderPayload, builderKzgCommitments, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex())
require.NoError(t, err)
require.DeepEqual(t, [][]uint8(nil), builderKzgCommitments)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments))
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments, defaultBuilderBoostFactor))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(3), e.BlockNumber()) // Local block
@@ -277,7 +404,7 @@ func TestServer_setExecutionData(t *testing.T) {
builderPayload, builderKzgCommitments, err := vs.getBuilderPayloadAndBlobs(ctx, b.Slot(), b.ProposerIndex())
require.ErrorIs(t, consensus_types.ErrNilObjectWrapped, err) // Builder returns fault. Use local block
require.DeepEqual(t, [][]uint8(nil), builderKzgCommitments)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments))
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments, defaultBuilderBoostFactor))
e, err := blk.Block().Body().Execution()
require.NoError(t, err)
require.Equal(t, uint64(4), e.BlockNumber()) // Local block
@@ -385,7 +512,7 @@ func TestServer_setExecutionData(t *testing.T) {
localPayload, _, err := vs.getLocalPayload(ctx, blk.Block(), denebTransitionState)
require.NoError(t, err)
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments))
require.NoError(t, setExecutionData(context.Background(), blk, localPayload, builderPayload, builderKzgCommitments, defaultBuilderBoostFactor))
got, err := blk.Block().Body().BlobKzgCommitments()
require.NoError(t, err)

View File

@@ -57,7 +57,7 @@ func (c *blobsBundleCache) prune(minSlot primitives.Slot) {
}
// buildBlobSidecars given a block, builds the blob sidecars for the block.
func buildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgproofs [][]byte) ([]*ethpb.BlobSidecar, error) {
func buildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
if blk.Version() < version.Deneb {
return nil, nil // No blobs before deneb.
}
@@ -66,7 +66,7 @@ func buildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgproo
return nil, err
}
cLen := len(denebBlk.Block.Body.BlobKzgCommitments)
if cLen != len(blobs) || cLen != len(kzgproofs) {
if cLen != len(blobs) || cLen != len(kzgProofs) {
return nil, errors.New("blob KZG commitments don't match number of blobs or KZG proofs")
}
blobSidecars := make([]*ethpb.BlobSidecar, cLen)
@@ -84,7 +84,7 @@ func buildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgproo
Index: uint64(i),
Blob: blobs[i],
KzgCommitment: denebBlk.Block.Body.BlobKzgCommitments[i],
KzgProof: kzgproofs[i],
KzgProof: kzgProofs[i],
SignedBlockHeader: header,
CommitmentInclusionProof: proof,
}

View File

@@ -173,6 +173,10 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
}
bundleCache.add(slot, bundle)
warnIfFeeRecipientDiffers(payload, val.FeeRecipient)
localValueGwei, err := payload.ValueInGwei()
if err == nil {
log.WithField("value", localValueGwei).Debug("received execution payload from local engine")
}
return payload, overrideBuilder, nil
}

View File

@@ -628,9 +628,10 @@ func injectSlashings(t *testing.T, st state.BeaconState, keys []bls.SecretKey, s
func TestProposer_ProposeBlock_OK(t *testing.T) {
tests := []struct {
name string
block func([32]byte) *ethpb.GenericSignedBeaconBlock
err string
name string
block func([32]byte) *ethpb.GenericSignedBeaconBlock
err string
useBuilder bool
}{
{
name: "phase0",
@@ -677,6 +678,24 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
blk := &ethpb.GenericSignedBeaconBlock_BlindedCapella{BlindedCapella: blockToPropose}
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
useBuilder: true,
},
{
name: "blind capella no builder",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
blockToPropose := util.NewBlindedBeaconBlockCapella()
blockToPropose.Block.Slot = 5
blockToPropose.Block.ParentRoot = parent[:]
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*enginev1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
blockToPropose.Block.Body.ExecutionPayloadHeader.TransactionsRoot = txRoot[:]
blockToPropose.Block.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
blk := &ethpb.GenericSignedBeaconBlock_BlindedCapella{BlindedCapella: blockToPropose}
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
err: "unconfigured block builder",
},
{
name: "bellatrix",
@@ -698,6 +717,69 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
},
{
name: "deneb block some blobs",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
blockToPropose := util.NewBeaconBlockContentsDeneb()
blockToPropose.Block.Block.Slot = 5
blockToPropose.Block.Block.ParentRoot = parent[:]
blockToPropose.Blobs = [][]byte{{0x01}, {0x02}, {0x03}}
blockToPropose.KzgProofs = [][]byte{{0x01}, {0x02}, {0x03}}
blockToPropose.Block.Block.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte("kc"), 48), bytesutil.PadTo([]byte("kc1"), 48), bytesutil.PadTo([]byte("kc2"), 48)}
blk := &ethpb.GenericSignedBeaconBlock_Deneb{Deneb: blockToPropose}
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
},
{
name: "deneb block some blobs (kzg and blob count missmatch)",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
blockToPropose := util.NewBeaconBlockContentsDeneb()
blockToPropose.Block.Block.Slot = 5
blockToPropose.Block.Block.ParentRoot = parent[:]
blockToPropose.Blobs = [][]byte{{0x01}, {0x02}, {0x03}}
blockToPropose.KzgProofs = [][]byte{{0x01}, {0x02}, {0x03}}
blk := &ethpb.GenericSignedBeaconBlock_Deneb{Deneb: blockToPropose}
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
err: "blob KZG commitments don't match number of blobs or KZG proofs",
},
{
name: "blind deneb block some blobs",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
blockToPropose := util.NewBlindedBeaconBlockDeneb()
blockToPropose.Message.Slot = 5
blockToPropose.Message.ParentRoot = parent[:]
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*enginev1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
blockToPropose.Message.Body.ExecutionPayloadHeader.TransactionsRoot = txRoot[:]
blockToPropose.Message.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
blockToPropose.Message.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}
blk := &ethpb.GenericSignedBeaconBlock_BlindedDeneb{BlindedDeneb: blockToPropose}
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
useBuilder: true,
},
{
name: "blind deneb block some blobs (commitment value does not match blob)",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
blockToPropose := util.NewBlindedBeaconBlockDeneb()
blockToPropose.Message.Slot = 5
blockToPropose.Message.ParentRoot = parent[:]
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*enginev1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
blockToPropose.Message.Body.ExecutionPayloadHeader.TransactionsRoot = txRoot[:]
blockToPropose.Message.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
blockToPropose.Message.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte("kc"), 48)}
blk := &ethpb.GenericSignedBeaconBlock_BlindedDeneb{BlindedDeneb: blockToPropose}
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
useBuilder: true,
err: "unblind sidecars failed: commitment value doesn't match block",
},
}
for _, tt := range tests {
@@ -715,8 +797,11 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
BlockReceiver: c,
BlockNotifier: c.BlockNotifier(),
P2P: mockp2p.NewTestP2P(t),
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: true, PayloadCapella: emptyPayloadCapella(), PayloadDeneb: emptyPayloadDeneb(), BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{{0x01}}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}}},
BeaconDB: db,
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: tt.useBuilder, PayloadCapella: emptyPayloadCapella(), PayloadDeneb: emptyPayloadDeneb(),
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}}},
BeaconDB: db,
BlobReceiver: c,
OperationNotifier: c.OperationNotifier(),
}
blockToPropose := tt.block(bsRoot)
res, err := proposerServer.ProposeBeaconBlock(context.Background(), blockToPropose)

View File

@@ -2,125 +2,18 @@ package validator
import (
"bytes"
"context"
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/sirupsen/logrus"
"google.golang.org/protobuf/proto"
)
type unblinder struct {
b interfaces.SignedBeaconBlock
builder builder.BlockBuilder
}
func newUnblinder(b interfaces.SignedBeaconBlock, builder builder.BlockBuilder) (*unblinder, error) {
if err := consensusblocks.BeaconBlockIsNil(b); err != nil {
return nil, err
}
if builder == nil {
return nil, errors.New("nil builder provided")
}
return &unblinder{
b: b,
builder: builder,
}, nil
}
func (u *unblinder) unblindBuilderBlock(ctx context.Context) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
if !u.b.IsBlinded() || u.b.Version() < version.Bellatrix {
return u.b, nil, nil
}
if u.b.IsBlinded() && !u.builder.Configured() {
return nil, nil, errors.New("builder not configured")
}
psb, err := u.blindedProtoBlock()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get blinded proto block")
}
sb, err := consensusblocks.NewSignedBeaconBlock(psb)
if err != nil {
return nil, nil, errors.Wrap(err, "could not create signed block")
}
if err = copyBlockData(u.b, sb); err != nil {
return nil, nil, errors.Wrap(err, "could not copy block data")
}
h, err := u.b.Block().Body().Execution()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get execution")
}
if err = sb.SetExecution(h); err != nil {
return nil, nil, errors.Wrap(err, "could not set execution")
}
payload, blobsBundle, err := u.builder.SubmitBlindedBlock(ctx, sb)
if err != nil {
return nil, nil, errors.Wrap(err, "could not submit blinded block")
}
headerRoot, err := h.HashTreeRoot()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get header root")
}
payloadRoot, err := payload.HashTreeRoot()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get payload root")
}
if headerRoot != payloadRoot {
return nil, nil, fmt.Errorf("header and payload root do not match, consider disconnect from relay to avoid further issues, "+
"%#x != %#x", headerRoot, payloadRoot)
}
bb, err := u.protoBlock()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get proto block")
}
wb, err := consensusblocks.NewSignedBeaconBlock(bb)
if err != nil {
return nil, nil, errors.Wrap(err, "could not create signed block")
}
if err = copyBlockData(sb, wb); err != nil {
return nil, nil, errors.Wrap(err, "could not copy block data")
}
if err = wb.SetExecution(payload); err != nil {
return nil, nil, errors.Wrap(err, "could not set execution")
}
txs, err := payload.Transactions()
if err != nil {
return nil, nil, errors.Wrap(err, "could not get transactions from payload")
}
if wb.Version() >= version.Deneb && blobsBundle != nil {
log.WithField("blobCount", len(blobsBundle.Blobs))
}
log.WithFields(logrus.Fields{
"blockHash": fmt.Sprintf("%#x", h.BlockHash()),
"feeRecipient": fmt.Sprintf("%#x", h.FeeRecipient()),
"gasUsed": h.GasUsed(),
"slot": u.b.Block().Slot(),
"txs": len(txs),
}).Info("Retrieved full payload from builder")
sidecars, err := unblindBlobsSidecars(u.b, blobsBundle)
if err != nil {
return nil, nil, errors.Wrap(err, "could not unblind blobs sidecars")
}
return wb, sidecars, nil
}
func unblindBlobsSidecars(block interfaces.SignedBeaconBlock, bundle *enginev1.BlobsBundle) ([]*ethpb.BlobSidecar, error) {
if bundle == nil {
if block.Version() < version.Deneb || bundle == nil {
return nil, nil
}
header, err := block.Header()
@@ -168,98 +61,3 @@ func unblindBlobsSidecars(block interfaces.SignedBeaconBlock, bundle *enginev1.B
}
return sidecars, nil
}
func copyBlockData(src interfaces.SignedBeaconBlock, dst interfaces.SignedBeaconBlock) error {
agg, err := src.Block().Body().SyncAggregate()
if err != nil {
return errors.Wrap(err, "could not get sync aggregate")
}
parentRoot := src.Block().ParentRoot()
stateRoot := src.Block().StateRoot()
randaoReveal := src.Block().Body().RandaoReveal()
graffiti := src.Block().Body().Graffiti()
sig := src.Signature()
blsToExecChanges, err := src.Block().Body().BLSToExecutionChanges()
if err != nil && !errors.Is(err, consensus_types.ErrUnsupportedField) {
return errors.Wrap(err, "could not get bls to execution changes")
}
kzgCommitments, err := src.Block().Body().BlobKzgCommitments()
if err != nil && !errors.Is(err, consensus_types.ErrUnsupportedField) {
return errors.Wrap(err, "could not get blob kzg commitments")
}
dst.SetSlot(src.Block().Slot())
dst.SetProposerIndex(src.Block().ProposerIndex())
dst.SetParentRoot(parentRoot[:])
dst.SetStateRoot(stateRoot[:])
dst.SetRandaoReveal(randaoReveal[:])
dst.SetEth1Data(src.Block().Body().Eth1Data())
dst.SetGraffiti(graffiti[:])
dst.SetProposerSlashings(src.Block().Body().ProposerSlashings())
dst.SetAttesterSlashings(src.Block().Body().AttesterSlashings())
dst.SetAttestations(src.Block().Body().Attestations())
dst.SetDeposits(src.Block().Body().Deposits())
dst.SetVoluntaryExits(src.Block().Body().VoluntaryExits())
if err = dst.SetSyncAggregate(agg); err != nil {
return errors.Wrap(err, "could not set sync aggregate")
}
dst.SetSignature(sig[:])
if err = dst.SetBLSToExecutionChanges(blsToExecChanges); err != nil && !errors.Is(err, consensus_types.ErrUnsupportedField) {
return errors.Wrap(err, "could not set bls to execution changes")
}
if err = dst.SetBlobKzgCommitments(kzgCommitments); err != nil && !errors.Is(err, consensus_types.ErrUnsupportedField) {
return errors.Wrap(err, "could not set bls to execution changes")
}
return nil
}
func (u *unblinder) blindedProtoBlock() (proto.Message, error) {
switch u.b.Version() {
case version.Bellatrix:
return &ethpb.SignedBlindedBeaconBlockBellatrix{
Block: &ethpb.BlindedBeaconBlockBellatrix{
Body: &ethpb.BlindedBeaconBlockBodyBellatrix{},
},
}, nil
case version.Capella:
return &ethpb.SignedBlindedBeaconBlockCapella{
Block: &ethpb.BlindedBeaconBlockCapella{
Body: &ethpb.BlindedBeaconBlockBodyCapella{},
},
}, nil
case version.Deneb:
return &ethpb.SignedBlindedBeaconBlockDeneb{
Message: &ethpb.BlindedBeaconBlockDeneb{
Body: &ethpb.BlindedBeaconBlockBodyDeneb{},
},
}, nil
default:
return nil, fmt.Errorf("invalid version %s", version.String(u.b.Version()))
}
}
func (u *unblinder) protoBlock() (proto.Message, error) {
switch u.b.Version() {
case version.Bellatrix:
return &ethpb.SignedBeaconBlockBellatrix{
Block: &ethpb.BeaconBlockBellatrix{
Body: &ethpb.BeaconBlockBodyBellatrix{},
},
}, nil
case version.Capella:
return &ethpb.SignedBeaconBlockCapella{
Block: &ethpb.BeaconBlockCapella{
Body: &ethpb.BeaconBlockBodyCapella{},
},
}, nil
case version.Deneb:
return &ethpb.SignedBeaconBlockDeneb{
Block: &ethpb.BeaconBlockDeneb{
Body: &ethpb.BeaconBlockBodyDeneb{},
},
}, nil
default:
return nil, fmt.Errorf("invalid version %s", version.String(u.b.Version()))
}
}

View File

@@ -1,409 +0,0 @@
package validator
import (
"context"
"testing"
"github.com/pkg/errors"
builderTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/builder/testing"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
v1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func Test_unblindBuilderBlock(t *testing.T) {
p := emptyPayload()
p.GasLimit = 123
pCapella := emptyPayloadCapella()
pCapella.GasLimit = 123
pDeneb := emptyPayloadDeneb()
pDeneb.GasLimit = 123
pDeneb.ExcessBlobGas = 456
pDeneb.BlobGasUsed = 789
denebblk, denebsidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, fieldparams.MaxBlobsPerBlock)
denebCommitments, err := denebblk.Block().Body().BlobKzgCommitments()
require.NoError(t, err)
execution, err := denebblk.Block().Body().Execution()
require.NoError(t, err)
denebPayload, err := execution.PbDeneb()
require.NoError(t, err)
blobs := make([][]byte, len(denebsidecars))
for i, sidecar := range denebsidecars {
blobs[i] = sidecar.BlobSidecar.Blob
}
tests := []struct {
name string
blk interfaces.SignedBeaconBlock
mock *builderTest.MockBuilderService
err string
returnedBlk interfaces.SignedBeaconBlock
returnedBlobSidecars []blocks.ROBlob
}{
{
name: "old block version",
blk: func() interfaces.SignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
return wb
}(),
returnedBlk: func() interfaces.SignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
return wb
}(),
},
{
name: "blinded without configured builder",
blk: func() interfaces.SignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBlindedBeaconBlockBellatrix())
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: false,
},
err: "builder not configured",
},
{
name: "non-blinded without configured builder",
blk: func() interfaces.SignedBeaconBlock {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = &v1.ExecutionPayload{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
Transactions: make([][]byte, 0),
GasLimit: 123,
}
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: false,
Payload: p,
},
returnedBlk: func() interfaces.SignedBeaconBlock {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = p
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
},
{
name: "submit blind block error",
blk: func() interfaces.SignedBeaconBlock {
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
Payload: &v1.ExecutionPayload{},
HasConfigured: true,
ErrSubmitBlindedBlock: errors.New("can't submit"),
},
err: "can't submit",
},
{
name: "head and payload root mismatch",
blk: func() interfaces.SignedBeaconBlock {
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
Payload: p,
},
returnedBlk: func() interfaces.SignedBeaconBlock {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = p
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
err: "header and payload root do not match",
},
{
name: "can get payload Bellatrix",
blk: func() interfaces.SignedBeaconBlock {
b := util.NewBlindedBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
b.Block.Body.ExecutionPayloadHeader = &v1.ExecutionPayloadHeader{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: txRoot[:],
GasLimit: 123,
}
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
Payload: p,
},
returnedBlk: func() interfaces.SignedBeaconBlock {
b := util.NewBeaconBlockBellatrix()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.ExecutionPayload = p
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
},
{
name: "can get payload Capella",
blk: func() interfaces.SignedBeaconBlock {
b := util.NewBlindedBeaconBlockCapella()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.BlsToExecutionChanges = []*eth.SignedBLSToExecutionChange{
{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: 123,
FromBlsPubkey: []byte{'a'},
ToExecutionAddress: []byte{'a'},
},
Signature: []byte("sig123"),
},
{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: 456,
FromBlsPubkey: []byte{'b'},
ToExecutionAddress: []byte{'b'},
},
Signature: []byte("sig456"),
},
}
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*v1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
b.Block.Body.ExecutionPayloadHeader = &v1.ExecutionPayloadHeaderCapella{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: txRoot[:],
WithdrawalsRoot: withdrawalsRoot[:],
GasLimit: 123,
}
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
PayloadCapella: pCapella,
},
returnedBlk: func() interfaces.SignedBeaconBlock {
b := util.NewBeaconBlockCapella()
b.Block.Slot = 1
b.Block.ProposerIndex = 2
b.Block.Body.BlsToExecutionChanges = []*eth.SignedBLSToExecutionChange{
{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: 123,
FromBlsPubkey: []byte{'a'},
ToExecutionAddress: []byte{'a'},
},
Signature: []byte("sig123"),
},
{
Message: &eth.BLSToExecutionChange{
ValidatorIndex: 456,
FromBlsPubkey: []byte{'b'},
ToExecutionAddress: []byte{'b'},
},
Signature: []byte("sig456"),
},
}
b.Block.Body.ExecutionPayload = pCapella
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
},
{
name: "can get payload and blobs Deneb",
blk: func() interfaces.SignedBeaconBlock {
blindedBlock, err := denebblk.ToBlinded()
require.NoError(t, err)
b, err := blindedBlock.PbBlindedDenebBlock()
require.NoError(t, err)
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
PayloadDeneb: denebPayload,
BlobBundle: &v1.BlobsBundle{
KzgCommitments: denebCommitments,
Proofs: [][]byte{{'d', 0}, {'d', 1}, {'d', 2}, {'d', 3}, {'d', 4}, {'d', 5}},
Blobs: blobs,
},
},
returnedBlk: func() interfaces.SignedBeaconBlock {
b, err := denebblk.PbDenebBlock()
require.NoError(t, err)
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
returnedBlobSidecars: denebsidecars,
},
{
name: "deneb mismatch commitments count",
blk: func() interfaces.SignedBeaconBlock {
blindedBlock, err := denebblk.ToBlinded()
require.NoError(t, err)
b, err := blindedBlock.PbBlindedDenebBlock()
require.NoError(t, err)
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
PayloadDeneb: denebPayload,
BlobBundle: &v1.BlobsBundle{
KzgCommitments: [][]byte{{'c', 0}, {'c', 1}, {'c', 2}, {'c', 3}, {'c', 4}},
Proofs: [][]byte{{'d', 0}, {'d', 1}, {'d', 2}, {'d', 3}, {'d', 4}, {'d', 5}},
Blobs: blobs,
},
},
err: "mismatch commitments count",
},
{
name: "deneb mismatch proofs count",
blk: func() interfaces.SignedBeaconBlock {
blindedBlock, err := denebblk.ToBlinded()
require.NoError(t, err)
b, err := blindedBlock.PbBlindedDenebBlock()
require.NoError(t, err)
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
PayloadDeneb: denebPayload,
BlobBundle: &v1.BlobsBundle{
KzgCommitments: [][]byte{{'c', 0}, {'c', 1}, {'c', 2}, {'c', 3}, {'c', 4}, {'c', 5}},
Proofs: [][]byte{{'d', 0}, {'d', 1}, {'d', 2}, {'d', 3}, {'d', 4}},
Blobs: blobs,
},
},
err: "mismatch proofs count",
},
{
name: "deneb different count commitments bundle vs block",
blk: func() interfaces.SignedBeaconBlock {
blindedBlock, err := denebblk.ToBlinded()
require.NoError(t, err)
b, err := blindedBlock.PbBlindedDenebBlock()
require.NoError(t, err)
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
PayloadDeneb: denebPayload,
BlobBundle: &v1.BlobsBundle{
KzgCommitments: [][]byte{{'c', 0}, {'c', 1}, {'c', 2}, {'c', 3}, {'c', 4}},
Proofs: [][]byte{{'d', 0}, {'d', 1}, {'d', 2}, {'d', 3}, {'d', 4}},
Blobs: blobs[:5],
},
},
err: "commitment count doesn't match block",
},
{
name: "deneb different value commitments bundle vs block",
blk: func() interfaces.SignedBeaconBlock {
blindedBlock, err := denebblk.ToBlinded()
require.NoError(t, err)
b, err := blindedBlock.PbBlindedDenebBlock()
require.NoError(t, err)
wb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
return wb
}(),
mock: &builderTest.MockBuilderService{
HasConfigured: true,
PayloadDeneb: denebPayload,
BlobBundle: &v1.BlobsBundle{
KzgCommitments: [][]byte{{'c', 0}, {'c', 1}, {'c', 2}, {'c', 3}, {'c', 4}, {'c', 5}},
Proofs: [][]byte{{'d', 0}, {'d', 1}, {'d', 2}, {'d', 3}, {'d', 4}, {'d', 5}},
Blobs: blobs,
},
},
err: "commitment value doesn't match block",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
unblinder, err := newUnblinder(tc.blk, tc.mock)
require.NoError(t, err)
gotBlk, gotBlobs, err := unblinder.unblindBuilderBlock(context.Background())
if tc.err != "" {
require.ErrorContains(t, tc.err, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tc.returnedBlk, gotBlk)
if tc.returnedBlobSidecars != nil {
blobs := make([]blocks.ROBlob, len(gotBlobs))
for i := range gotBlobs {
blobs[i], err = blocks.NewROBlob(gotBlobs[i])
require.NoError(t, err)
// TODO: update this check when generate function is updated for inclusion proofs require.DeepEqual(t, tc.returnedBlobSidecars[i].CommitmentInclusionProof, blobs[i].CommitmentInclusionProof)
require.DeepEqual(t, tc.returnedBlobSidecars[i].SignedBlockHeader, blobs[i].SignedBlockHeader)
require.Equal(t, len(tc.returnedBlobSidecars[i].Blob), len(blobs[i].Blob))
}
}
}
})
}
}

View File

@@ -66,6 +66,7 @@ type ReadOnlyBeaconState interface {
HistoricalSummaries() ([]*ethpb.HistoricalSummary, error)
Slashings() []uint64
FieldReferencesCount() map[string]uint64
RecordStateMetrics()
MarshalSSZ() ([]byte, error)
IsNil() bool
Version() int

View File

@@ -5,6 +5,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/types"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
multi_value_slice "github.com/prysmaticlabs/prysm/v4/container/multi-value-slice"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
@@ -12,24 +13,26 @@ import (
)
var (
multiValueRandaoMixesCountGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "multi_value_randao_mixes_count",
})
multiValueBlockRootsCountGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "multi_value_block_roots_count",
})
multiValueStateRootsCountGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "multi_value_state_roots_count",
})
multiValueBalancesCountGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "multi_value_balances_count",
})
multiValueValidatorsCountGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "multi_value_validators_count",
})
multiValueInactivityScoresCountGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "multi_value_inactivity_scores_count",
})
multiValueCountGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "multi_value_object_count",
Help: "The number of instances that exist for the multivalue slice for a particular field.",
}, []string{"field"})
multiValueIndividualElementsCountGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "multi_value_individual_elements_count",
Help: "The number of individual elements that exist for the multivalue slice object.",
}, []string{"field"})
multiValueIndividualElementReferencesCountGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "multi_value_individual_element_references_count",
Help: "The number of individual element references that exist for the multivalue slice object.",
}, []string{"field"})
multiValueAppendedElementsCountGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "multi_value_appended_elements_count",
Help: "The number of appended elements that exist for the multivalue slice object.",
}, []string{"field"})
multiValueAppendedElementReferencesCountGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "multi_value_appended_element_references_count",
Help: "The number of appended element references that exist for the multivalue slice object.",
}, []string{"field"})
)
// MultiValueRandaoMixes is a multi-value slice of randao mixes.
@@ -43,7 +46,7 @@ func NewMultiValueRandaoMixes(mixes [][]byte) *MultiValueRandaoMixes {
}
mv := &MultiValueRandaoMixes{}
mv.Init(items)
multiValueRandaoMixesCountGauge.Inc()
multiValueCountGauge.WithLabelValues(types.RandaoMixes.String()).Inc()
runtime.SetFinalizer(mv, randaoMixesFinalizer)
return mv
}
@@ -59,7 +62,7 @@ func NewMultiValueBlockRoots(roots [][]byte) *MultiValueBlockRoots {
}
mv := &MultiValueBlockRoots{}
mv.Init(items)
multiValueBlockRootsCountGauge.Inc()
multiValueCountGauge.WithLabelValues(types.BlockRoots.String()).Inc()
runtime.SetFinalizer(mv, blockRootsFinalizer)
return mv
}
@@ -75,7 +78,7 @@ func NewMultiValueStateRoots(roots [][]byte) *MultiValueStateRoots {
}
mv := &MultiValueStateRoots{}
mv.Init(items)
multiValueStateRootsCountGauge.Inc()
multiValueCountGauge.WithLabelValues(types.StateRoots.String()).Inc()
runtime.SetFinalizer(mv, stateRootsFinalizer)
return mv
}
@@ -89,7 +92,7 @@ func NewMultiValueBalances(balances []uint64) *MultiValueBalances {
copy(items, balances)
mv := &MultiValueBalances{}
mv.Init(items)
multiValueBalancesCountGauge.Inc()
multiValueCountGauge.WithLabelValues(types.Balances.String()).Inc()
runtime.SetFinalizer(mv, balancesFinalizer)
return mv
}
@@ -103,7 +106,7 @@ func NewMultiValueInactivityScores(scores []uint64) *MultiValueInactivityScores
copy(items, scores)
mv := &MultiValueInactivityScores{}
mv.Init(items)
multiValueInactivityScoresCountGauge.Inc()
multiValueCountGauge.WithLabelValues(types.InactivityScores.String()).Inc()
runtime.SetFinalizer(mv, inactivityScoresFinalizer)
return mv
}
@@ -115,31 +118,31 @@ type MultiValueValidators = multi_value_slice.Slice[*ethpb.Validator]
func NewMultiValueValidators(vals []*ethpb.Validator) *MultiValueValidators {
mv := &MultiValueValidators{}
mv.Init(vals)
multiValueValidatorsCountGauge.Inc()
multiValueCountGauge.WithLabelValues(types.Validators.String()).Inc()
runtime.SetFinalizer(mv, validatorsFinalizer)
return mv
}
func randaoMixesFinalizer(m *MultiValueRandaoMixes) {
multiValueRandaoMixesCountGauge.Dec()
multiValueCountGauge.WithLabelValues(types.RandaoMixes.String()).Dec()
}
func blockRootsFinalizer(m *MultiValueBlockRoots) {
multiValueBlockRootsCountGauge.Dec()
multiValueCountGauge.WithLabelValues(types.BlockRoots.String()).Dec()
}
func stateRootsFinalizer(m *MultiValueStateRoots) {
multiValueStateRootsCountGauge.Dec()
multiValueCountGauge.WithLabelValues(types.StateRoots.String()).Dec()
}
func balancesFinalizer(m *MultiValueBalances) {
multiValueBalancesCountGauge.Dec()
multiValueCountGauge.WithLabelValues(types.Balances.String()).Dec()
}
func validatorsFinalizer(m *MultiValueValidators) {
multiValueValidatorsCountGauge.Dec()
multiValueCountGauge.WithLabelValues(types.Validators.String()).Dec()
}
func inactivityScoresFinalizer(m *MultiValueInactivityScores) {
multiValueInactivityScoresCountGauge.Dec()
multiValueCountGauge.WithLabelValues(types.InactivityScores.String()).Dec()
}

View File

@@ -932,6 +932,68 @@ func (b *BeaconState) FieldReferencesCount() map[string]uint64 {
return refMap
}
// RecordStateMetrics proceeds to record any state related metrics data.
func (b *BeaconState) RecordStateMetrics() {
b.lock.RLock()
defer b.lock.RUnlock()
// Only run this for nodes running with the experimental state.
if !features.Get().EnableExperimentalState {
return
}
// Validators
if b.validatorsMultiValue != nil {
stats := b.validatorsMultiValue.MultiValueStatistics()
multiValueIndividualElementsCountGauge.WithLabelValues(types.Validators.String()).Set(float64(stats.TotalIndividualElements))
multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.Validators.String()).Set(float64(stats.TotalIndividualElemReferences))
multiValueAppendedElementsCountGauge.WithLabelValues(types.Validators.String()).Set(float64(stats.TotalAppendedElements))
multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.Validators.String()).Set(float64(stats.TotalAppendedElemReferences))
}
// Balances
if b.balancesMultiValue != nil {
stats := b.balancesMultiValue.MultiValueStatistics()
multiValueIndividualElementsCountGauge.WithLabelValues(types.Balances.String()).Set(float64(stats.TotalIndividualElements))
multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.Balances.String()).Set(float64(stats.TotalIndividualElemReferences))
multiValueAppendedElementsCountGauge.WithLabelValues(types.Balances.String()).Set(float64(stats.TotalAppendedElements))
multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.Balances.String()).Set(float64(stats.TotalAppendedElemReferences))
}
// InactivityScores
if b.inactivityScoresMultiValue != nil {
stats := b.inactivityScoresMultiValue.MultiValueStatistics()
multiValueIndividualElementsCountGauge.WithLabelValues(types.InactivityScores.String()).Set(float64(stats.TotalIndividualElements))
multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.InactivityScores.String()).Set(float64(stats.TotalIndividualElemReferences))
multiValueAppendedElementsCountGauge.WithLabelValues(types.InactivityScores.String()).Set(float64(stats.TotalAppendedElements))
multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.InactivityScores.String()).Set(float64(stats.TotalAppendedElemReferences))
}
// BlockRoots
if b.blockRootsMultiValue != nil {
stats := b.blockRootsMultiValue.MultiValueStatistics()
multiValueIndividualElementsCountGauge.WithLabelValues(types.BlockRoots.String()).Set(float64(stats.TotalIndividualElements))
multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.BlockRoots.String()).Set(float64(stats.TotalIndividualElemReferences))
multiValueAppendedElementsCountGauge.WithLabelValues(types.BlockRoots.String()).Set(float64(stats.TotalAppendedElements))
multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.BlockRoots.String()).Set(float64(stats.TotalAppendedElemReferences))
}
// StateRoots
if b.stateRootsMultiValue != nil {
stats := b.stateRootsMultiValue.MultiValueStatistics()
multiValueIndividualElementsCountGauge.WithLabelValues(types.StateRoots.String()).Set(float64(stats.TotalIndividualElements))
multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.StateRoots.String()).Set(float64(stats.TotalIndividualElemReferences))
multiValueAppendedElementsCountGauge.WithLabelValues(types.StateRoots.String()).Set(float64(stats.TotalAppendedElements))
multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.StateRoots.String()).Set(float64(stats.TotalAppendedElemReferences))
}
// RandaoMixes
if b.randaoMixesMultiValue != nil {
stats := b.randaoMixesMultiValue.MultiValueStatistics()
multiValueIndividualElementsCountGauge.WithLabelValues(types.RandaoMixes.String()).Set(float64(stats.TotalIndividualElements))
multiValueIndividualElementReferencesCountGauge.WithLabelValues(types.RandaoMixes.String()).Set(float64(stats.TotalIndividualElemReferences))
multiValueAppendedElementsCountGauge.WithLabelValues(types.RandaoMixes.String()).Set(float64(stats.TotalAppendedElements))
multiValueAppendedElementReferencesCountGauge.WithLabelValues(types.RandaoMixes.String()).Set(float64(stats.TotalAppendedElemReferences))
}
}
// IsNil checks if the state and the underlying proto
// object are nil.
func (b *BeaconState) IsNil() bool {

View File

@@ -15,7 +15,6 @@ go_library(
"fuzz_exports.go", # keep
"log.go",
"metrics.go",
"mock_blob_verifier.go",
"options.go",
"pending_attestations_queue.go",
"pending_blocks_queue.go",

View File

@@ -12,15 +12,18 @@ go_library(
"log.go",
"round_robin.go",
"service.go",
"verification.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync",
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//async/abool:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/das:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/p2p:go_default_library",
@@ -38,6 +41,7 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//container/leaky-bucket:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime:go_default_library",
@@ -69,6 +73,7 @@ go_test(
deps = [
"//async/abool:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/das:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/db/testing:go_default_library",
@@ -80,6 +85,7 @@ go_test(
"//beacon-chain/startup:go_default_library",
"//beacon-chain/sync:go_default_library",
"//beacon-chain/sync/verify:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",

View File

@@ -120,7 +120,7 @@ type fetchRequestResponse struct {
pid peer.ID
start primitives.Slot
count uint64
bwb []blocks2.BlockWithVerifiedBlobs
bwb []blocks2.BlockWithROBlobs
err error
}
@@ -263,7 +263,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
response := &fetchRequestResponse{
start: start,
count: count,
bwb: []blocks2.BlockWithVerifiedBlobs{},
bwb: []blocks2.BlockWithROBlobs{},
err: nil,
}
@@ -304,7 +304,7 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
ctx context.Context,
start primitives.Slot, count uint64,
peers []peer.ID,
) ([]blocks2.BlockWithVerifiedBlobs, peer.ID, error) {
) ([]blocks2.BlockWithROBlobs, peer.ID, error) {
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlocksFromPeer")
defer span.End()
@@ -332,20 +332,20 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
return nil, "", errNoPeersAvailable
}
func sortedBlockWithVerifiedBlobSlice(blocks []interfaces.ReadOnlySignedBeaconBlock) ([]blocks2.BlockWithVerifiedBlobs, error) {
rb := make([]blocks2.BlockWithVerifiedBlobs, len(blocks))
func sortedBlockWithVerifiedBlobSlice(blocks []interfaces.ReadOnlySignedBeaconBlock) ([]blocks2.BlockWithROBlobs, error) {
rb := make([]blocks2.BlockWithROBlobs, len(blocks))
for i, b := range blocks {
ro, err := blocks2.NewROBlock(b)
if err != nil {
return nil, err
}
rb[i] = blocks2.BlockWithVerifiedBlobs{Block: ro}
rb[i] = blocks2.BlockWithROBlobs{Block: ro}
}
sort.Sort(blocks2.BlockWithVerifiedBlobsSlice(rb))
sort.Sort(blocks2.BlockWithROBlobsSlice(rb))
return rb, nil
}
func blobRequest(bwb []blocks2.BlockWithVerifiedBlobs, blobWindowStart primitives.Slot) *p2ppb.BlobSidecarsByRangeRequest {
func blobRequest(bwb []blocks2.BlockWithROBlobs, blobWindowStart primitives.Slot) *p2ppb.BlobSidecarsByRangeRequest {
if len(bwb) == 0 {
return nil
}
@@ -360,7 +360,7 @@ func blobRequest(bwb []blocks2.BlockWithVerifiedBlobs, blobWindowStart primitive
}
}
func lowestSlotNeedsBlob(retentionStart primitives.Slot, bwb []blocks2.BlockWithVerifiedBlobs) *primitives.Slot {
func lowestSlotNeedsBlob(retentionStart primitives.Slot, bwb []blocks2.BlockWithROBlobs) *primitives.Slot {
if len(bwb) == 0 {
return nil
}
@@ -398,7 +398,7 @@ func sortBlobs(blobs []blocks.ROBlob) []blocks.ROBlob {
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
func verifyAndPopulateBlobs(bwb []blocks2.BlockWithVerifiedBlobs, blobs []blocks.ROBlob, blobWindowStart primitives.Slot) ([]blocks2.BlockWithVerifiedBlobs, error) {
func verifyAndPopulateBlobs(bwb []blocks2.BlockWithROBlobs, blobs []blocks.ROBlob, blobWindowStart primitives.Slot) ([]blocks2.BlockWithROBlobs, error) {
// Assumes bwb has already been sorted by sortedBlockWithVerifiedBlobSlice.
blobs = sortBlobs(blobs)
blobi := 0
@@ -450,7 +450,7 @@ func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) e
}
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.BlockWithVerifiedBlobs, pid peer.ID) ([]blocks2.BlockWithVerifiedBlobs, error) {
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks2.BlockWithROBlobs, pid peer.ID) ([]blocks2.BlockWithROBlobs, error) {
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
defer span.End()
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {

View File

@@ -306,9 +306,9 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
fetcher.stop()
}()
processFetchedBlocks := func() ([]blocks.BlockWithVerifiedBlobs, error) {
processFetchedBlocks := func() ([]blocks.BlockWithROBlobs, error) {
defer cancel()
var unionRespBlocks []blocks.BlockWithVerifiedBlobs
var unionRespBlocks []blocks.BlockWithROBlobs
for {
select {
@@ -347,7 +347,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
bwb, err := processFetchedBlocks()
assert.NoError(t, err)
sort.Sort(blocks.BlockWithVerifiedBlobsSlice(bwb))
sort.Sort(blocks.BlockWithROBlobsSlice(bwb))
ss := make([]primitives.Slot, len(bwb))
for i, b := range bwb {
ss[i] = b.Block.Block().Slot()
@@ -454,7 +454,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
}
}()
var bwb []blocks.BlockWithVerifiedBlobs
var bwb []blocks.BlockWithROBlobs
select {
case <-ctx.Done():
t.Error(ctx.Err())
@@ -1015,7 +1015,7 @@ func TestLowestSlotNeedsBlob(t *testing.T) {
func TestBlobRequest(t *testing.T) {
var nilReq *ethpb.BlobSidecarsByRangeRequest
// no blocks
req := blobRequest([]blocks.BlockWithVerifiedBlobs{}, 0)
req := blobRequest([]blocks.BlockWithROBlobs{}, 0)
require.Equal(t, nilReq, req)
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
@@ -1047,7 +1047,7 @@ func TestBlobRequest(t *testing.T) {
require.Equal(t, len(allAfter), int(req.Count))
}
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithVerifiedBlobs, []blocks.ROBlob) {
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROBlobs, []blocks.ROBlob) {
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
for i := range blks {

View File

@@ -23,7 +23,7 @@ import (
// either in DB or initial sync cache.
type forkData struct {
peer peer.ID
bwb []blocks.BlockWithVerifiedBlobs
bwb []blocks.BlockWithROBlobs
}
// nonSkippedSlotAfter checks slots after the given one in an attempt to find a non-empty future slot.

View File

@@ -92,7 +92,7 @@ type blocksQueue struct {
// blocksQueueFetchedData is a data container that is returned from a queue on each step.
type blocksQueueFetchedData struct {
pid peer.ID
bwb []blocks.BlockWithVerifiedBlobs
bwb []blocks.BlockWithROBlobs
}
// newBlocksQueue creates initialized priority queue.

View File

@@ -263,7 +263,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
highestExpectedSlot: tt.highestExpectedSlot,
})
assert.NoError(t, queue.start())
processBlock := func(b blocks.BlockWithVerifiedBlobs) error {
processBlock := func(b blocks.BlockWithROBlobs) error {
block := b.Block
if !beaconDB.HasBlock(ctx, block.Block().ParentRoot()) {
return fmt.Errorf("%w: %#x", errParentDoesNotExist, block.Block().ParentRoot())
@@ -272,10 +272,10 @@ func TestBlocksQueue_Loop(t *testing.T) {
if err != nil {
return err
}
return mc.ReceiveBlock(ctx, block, root)
return mc.ReceiveBlock(ctx, block, root, nil)
}
var blocks []blocks.BlockWithVerifiedBlobs
var blocks []blocks.BlockWithROBlobs
for data := range queue.fetchedData {
for _, b := range data.bwb {
if err := processBlock(b); err != nil {
@@ -538,7 +538,7 @@ func TestBlocksQueue_onDataReceivedEvent(t *testing.T) {
require.NoError(t, err)
response := &fetchRequestResponse{
pid: "abc",
bwb: []blocks.BlockWithVerifiedBlobs{
bwb: []blocks.BlockWithROBlobs{
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsb}},
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsbCopy}},
},
@@ -638,7 +638,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
queue.smm.machines[256].pid = pidDataParsed
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
queue.smm.machines[256].bwb = []blocks.BlockWithVerifiedBlobs{
queue.smm.machines[256].bwb = []blocks.BlockWithROBlobs{
{Block: rwsb},
}
@@ -672,7 +672,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
queue.smm.machines[320].pid = pidDataParsed
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
queue.smm.machines[320].bwb = []blocks.BlockWithVerifiedBlobs{
queue.smm.machines[320].bwb = []blocks.BlockWithROBlobs{
{Block: rwsb},
}
@@ -703,7 +703,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
queue.smm.machines[320].pid = pidDataParsed
rwsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
queue.smm.machines[320].bwb = []blocks.BlockWithVerifiedBlobs{
queue.smm.machines[320].bwb = []blocks.BlockWithROBlobs{
{Block: rwsb},
}

View File

@@ -46,7 +46,7 @@ type stateMachine struct {
start primitives.Slot
state stateID
pid peer.ID
bwb []blocks.BlockWithVerifiedBlobs
bwb []blocks.BlockWithROBlobs
updated time.Time
}
@@ -78,7 +78,7 @@ func (smm *stateMachineManager) addStateMachine(startSlot primitives.Slot) *stat
smm: smm,
start: startSlot,
state: stateNew,
bwb: []blocks.BlockWithVerifiedBlobs{},
bwb: []blocks.BlockWithROBlobs{},
updated: prysmTime.Now(),
}
smm.recalculateMachineAttribs()

View File

@@ -10,8 +10,8 @@ import (
"github.com/paulbellamy/ratecounter"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -25,10 +25,10 @@ const (
)
// blockReceiverFn defines block receiving function.
type blockReceiverFn func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error
type blockReceiverFn func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
// batchBlockReceiverFn defines batch receiving function.
type batchBlockReceiverFn func(ctx context.Context, blks []blocks.ROBlock) error
type batchBlockReceiverFn func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error
// Round Robin sync looks at the latest peer statuses and syncs up to the highest known epoch.
//
@@ -159,47 +159,46 @@ func (s *Service) processFetchedDataRegSync(
ctx context.Context, genesis time.Time, startSlot primitives.Slot, data *blocksQueueFetchedData) {
defer s.updatePeerScorerStats(data.pid, startSlot)
blockReceiver := s.cfg.Chain.ReceiveBlock
invalidBlocks := 0
blksWithoutParentCount := 0
bwb, err := validUnprocessed(ctx, data.bwb, s.cfg.Chain.HeadSlot(), s.isProcessedBlock)
if err != nil {
log.WithError(err).Debug("batch did not contain a valid sequence of unprocessed blocks")
return
}
if len(bwb) == 0 {
return
}
bv := newBlobBatchVerifier(s.newBlobVerifier)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
batchFields := logrus.Fields{
"firstSlot": data.bwb[0].Block.Block().Slot(),
"firstUnprocessed": bwb[0].Block.Block().Slot(),
}
for _, b := range data.bwb {
if len(b.Blobs) > 0 {
verified, err := verification.BlobSidecarSliceNoop(b.Blobs)
if err != nil {
log.WithField("root", b.Block.Root()).WithError(err).Error("blobs failed verification")
continue
}
for i := range verified {
if err := s.cfg.BlobStorage.Save(verified[i]); err != nil {
log.WithError(err).Warn("Failed to save blob sidecar")
}
}
if err := avs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to BlobSidecar issues")
return
}
if err := s.processBlock(ctx, genesis, b, blockReceiver); err != nil {
if err := s.processBlock(ctx, genesis, b, s.cfg.Chain.ReceiveBlock, avs); err != nil {
switch {
case errors.Is(err, errBlockAlreadyProcessed):
log.WithError(err).Debug("Block is not processed")
invalidBlocks++
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Skipping already processed block")
continue
case errors.Is(err, errParentDoesNotExist):
blksWithoutParentCount++
invalidBlocks++
log.WithFields(batchFields).WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
return
default:
log.WithError(err).Warn("Block is not processed")
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Block processing failure")
return
}
continue
}
}
if blksWithoutParentCount > 0 {
log.WithFields(logrus.Fields{
"missingParent": fmt.Sprintf("%#x", data.bwb[0].Block.Block().ParentRoot()),
"firstSlot": data.bwb[0].Block.Block().Slot(),
"lastSlot": data.bwb[blksWithoutParentCount-1].Block.Block().Slot(),
}).Debug("Could not process batch blocks due to missing parent")
}
// Add more visible logging if all blocks cannot be processed.
if len(data.bwb) == invalidBlocks {
log.WithField("error", "Range had no valid blocks to process").Warn("Range is not processed")
}
func syncFields(b blocks.ROBlock) logrus.Fields {
return logrus.Fields{
"root": fmt.Sprintf("%#x", b.Root()),
"lastSlot": b.Block().Slot(),
}
}
@@ -260,8 +259,9 @@ func (s *Service) logBatchSyncStatus(genesis time.Time, firstBlk blocks.ROBlock,
func (s *Service) processBlock(
ctx context.Context,
genesis time.Time,
bwb blocks.BlockWithVerifiedBlobs,
bwb blocks.BlockWithROBlobs,
blockReceiver blockReceiverFn,
avs das.AvailabilityStore,
) error {
blk := bwb.Block
blkRoot := blk.Root()
@@ -273,12 +273,12 @@ func (s *Service) processBlock(
if !s.cfg.Chain.HasBlock(ctx, blk.Block().ParentRoot()) {
return fmt.Errorf("%w: (in processBlock, slot=%d) %#x", errParentDoesNotExist, blk.Block().Slot(), blk.Block().ParentRoot())
}
return blockReceiver(ctx, blk, blkRoot)
return blockReceiver(ctx, blk, blkRoot, avs)
}
type processedChecker func(context.Context, blocks.ROBlock) bool
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithVerifiedBlobs, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithVerifiedBlobs, error) {
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROBlobs, error) {
// use a pointer to avoid confusing the zero-value with the case where the first element is processed.
var processed *int
for i := range bwb {
@@ -309,7 +309,7 @@ func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithVerifiedBlobs,
}
func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
bwb []blocks.BlockWithVerifiedBlobs, bFunc batchBlockReceiverFn) error {
bwb []blocks.BlockWithROBlobs, bFunc batchBlockReceiverFn) error {
if len(bwb) == 0 {
return errors.New("0 blocks provided into method")
}
@@ -328,32 +328,20 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
return fmt.Errorf("%w: %#x (in processBatchedBlocks, slot=%d)",
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
}
bv := newBlobBatchVerifier(s.newBlobVerifier)
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
s.logBatchSyncStatus(genesis, first, len(bwb))
blobCount := 0
for _, bb := range bwb {
if len(bb.Blobs) == 0 {
continue
}
verified, err := verification.BlobSidecarSliceNoop(bb.Blobs)
if err != nil {
return errors.Wrapf(err, "blobs for root %#x failed verification", bb.Block.Root())
if err := avs.Persist(s.clock.CurrentSlot(), bb.Blobs...); err != nil {
return err
}
for i := range verified {
if err := s.cfg.BlobStorage.Save(verified[i]); err != nil {
return errors.Wrapf(err, "failed to save blobs for block %#x", bb.Block.Root())
}
}
blobCount += len(bb.Blobs)
}
if blobCount > 0 {
log.WithFields(logrus.Fields{
"startSlot": bwb[0].Block.Block().Slot(),
"endSlot": bwb[len(bwb)-1].Block.Block().Slot(),
"count": blobCount,
}).Info("Processed blob sidecars")
}
return bFunc(ctx, blocks.BlockWithVerifiedBlobsSlice(bwb).ROBlocks())
return bFunc(ctx, blocks.BlockWithROBlobsSlice(bwb).ROBlocks(), avs)
}
// updatePeerScorerStats adjusts monitored metrics for a peer.

View File

@@ -8,6 +8,7 @@ import (
"github.com/paulbellamy/ratecounter"
"github.com/prysmaticlabs/prysm/v4/async/abool"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/das"
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
@@ -371,11 +372,11 @@ func TestService_processBlock(t *testing.T) {
require.NoError(t, err)
rowsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.processBlock(ctx, genesis, blocks.BlockWithVerifiedBlobs{Block: rowsb}, func(
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot))
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
return nil
})
}, nil)
assert.NoError(t, err)
// Duplicate processing should trigger error.
@@ -383,10 +384,10 @@ func TestService_processBlock(t *testing.T) {
require.NoError(t, err)
rowsb, err = blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.processBlock(ctx, genesis, blocks.BlockWithVerifiedBlobs{Block: rowsb}, func(
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
return nil
})
}, nil)
assert.ErrorContains(t, errBlockAlreadyProcessed.Error(), err)
// Continue normal processing, should proceed w/o errors.
@@ -394,11 +395,11 @@ func TestService_processBlock(t *testing.T) {
require.NoError(t, err)
rowsb, err = blocks.NewROBlock(wsb)
require.NoError(t, err)
err = s.processBlock(ctx, genesis, blocks.BlockWithVerifiedBlobs{Block: rowsb}, func(
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) error {
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot))
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
return nil
})
}, nil)
assert.NoError(t, err)
assert.Equal(t, primitives.Slot(2), s.cfg.Chain.HeadSlot(), "Unexpected head slot")
})
@@ -429,7 +430,7 @@ func TestService_processBlockBatch(t *testing.T) {
genesis := makeGenesisTime(32)
t.Run("process non-linear batch", func(t *testing.T) {
var batch []blocks.BlockWithVerifiedBlobs
var batch []blocks.BlockWithROBlobs
currBlockRoot := genesisBlkRoot
for i := primitives.Slot(1); i < 10; i++ {
parentRoot := currBlockRoot
@@ -443,11 +444,11 @@ func TestService_processBlockBatch(t *testing.T) {
require.NoError(t, err)
rowsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
batch = append(batch, blocks.BlockWithVerifiedBlobs{Block: rowsb})
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
currBlockRoot = blk1Root
}
var batch2 []blocks.BlockWithVerifiedBlobs
var batch2 []blocks.BlockWithROBlobs
for i := primitives.Slot(10); i < 20; i++ {
parentRoot := currBlockRoot
blk1 := util.NewBeaconBlock()
@@ -460,19 +461,19 @@ func TestService_processBlockBatch(t *testing.T) {
require.NoError(t, err)
rowsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
batch2 = append(batch2, blocks.BlockWithVerifiedBlobs{Block: rowsb})
batch2 = append(batch2, blocks.BlockWithROBlobs{Block: rowsb})
currBlockRoot = blk1Root
}
cbnormal := func(ctx context.Context, blks []blocks.ROBlock) error {
assert.NoError(t, s.cfg.Chain.ReceiveBlockBatch(ctx, blks))
cbnormal := func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error {
assert.NoError(t, s.cfg.Chain.ReceiveBlockBatch(ctx, blks, avs))
return nil
}
// Process block normally.
err = s.processBatchedBlocks(ctx, genesis, batch, cbnormal)
assert.NoError(t, err)
cbnil := func(ctx context.Context, blocks []blocks.ROBlock) error {
cbnil := func(ctx context.Context, blocks []blocks.ROBlock, _ das.AvailabilityStore) error {
return nil
}
@@ -480,7 +481,7 @@ func TestService_processBlockBatch(t *testing.T) {
err = s.processBatchedBlocks(ctx, genesis, batch, cbnil)
assert.ErrorContains(t, "block is already processed", err)
var badBatch2 []blocks.BlockWithVerifiedBlobs
var badBatch2 []blocks.BlockWithROBlobs
for i, b := range batch2 {
// create a non-linear batch
if i%3 == 0 && i != 0 {
@@ -675,7 +676,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
require.NoError(t, err)
util.SaveBlock(t, context.Background(), beaconDB, genesisBlk)
var batch []blocks.BlockWithVerifiedBlobs
var batch []blocks.BlockWithROBlobs
currBlockRoot := genesisBlkRoot
for i := primitives.Slot(1); i < 10; i++ {
parentRoot := currBlockRoot
@@ -689,7 +690,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
require.NoError(t, err)
rowsb, err := blocks.NewROBlock(wsb)
require.NoError(t, err)
batch = append(batch, blocks.BlockWithVerifiedBlobs{Block: rowsb})
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
currBlockRoot = blk1Root
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/runtime"
@@ -47,19 +48,32 @@ type Config struct {
// Service service.
type Service struct {
cfg *Config
ctx context.Context
cancel context.CancelFunc
synced *abool.AtomicBool
chainStarted *abool.AtomicBool
counter *ratecounter.RateCounter
genesisChan chan time.Time
clock *startup.Clock
cfg *Config
ctx context.Context
cancel context.CancelFunc
synced *abool.AtomicBool
chainStarted *abool.AtomicBool
counter *ratecounter.RateCounter
genesisChan chan time.Time
clock *startup.Clock
verifierWaiter *verification.InitializerWaiter
newBlobVerifier verification.NewBlobVerifier
}
// Option is a functional option for the initial-sync Service.
type Option func(*Service)
// WithVerifierWaiter sets the verification.InitializerWaiter
// for the initial-sync Service.
func WithVerifierWaiter(viw *verification.InitializerWaiter) Option {
return func(s *Service) {
s.verifierWaiter = viw
}
}
// NewService configures the initial sync service responsible for bringing the node up to the
// latest head of the blockchain.
func NewService(ctx context.Context, cfg *Config) *Service {
func NewService(ctx context.Context, cfg *Config, opts ...Option) *Service {
ctx, cancel := context.WithCancel(ctx)
s := &Service{
cfg: cfg,
@@ -71,7 +85,9 @@ func NewService(ctx context.Context, cfg *Config) *Service {
genesisChan: make(chan time.Time),
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}), // default clock to prevent panic
}
for _, o := range opts {
o(s)
}
return s
}
@@ -86,6 +102,13 @@ func (s *Service) Start() {
s.clock = clock
log.Info("Received state initialized event")
v, err := s.verifierWaiter.WaitForInitializer(s.ctx)
if err != nil {
log.WithError(err).Error("Could not get verification initializer")
return
}
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
gt := clock.GenesisTime()
if gt.IsZero() {
log.Debug("Exiting Initial Sync Service")

View File

@@ -13,6 +13,7 @@ import (
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -158,6 +159,7 @@ func TestService_InitStartStop(t *testing.T) {
StateNotifier: &mock.MockStateNotifier{},
InitialSyncComplete: make(chan struct{}),
})
s.verifierWaiter = verification.NewInitializerWaiter(gs, nil, nil)
time.Sleep(500 * time.Millisecond)
assert.NotNil(t, s)
if tt.setGenesis != nil {
@@ -200,6 +202,7 @@ func TestService_waitForStateInitialization(t *testing.T) {
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
genesisChan: make(chan time.Time),
}
s.verifierWaiter = verification.NewInitializerWaiter(cs, nil, nil)
return s, cs
}

Some files were not shown because too many files have changed in this diff Show More