mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
863eee7b40 | ||
|
|
6d89373583 | ||
|
|
9a421a2feb | ||
|
|
4e41d5c610 | ||
|
|
0b6bea43a8 | ||
|
|
f89afb0fbd | ||
|
|
3cd2973c92 | ||
|
|
d3e5710a63 | ||
|
|
f40b4f16c2 | ||
|
|
7fd4f746d6 | ||
|
|
2362d9f3c2 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -5,4 +5,4 @@
|
||||
*.bzl @prestonvanloon
|
||||
|
||||
# Anyone on prylabs team can approve dependency updates.
|
||||
deps.bzl @prysmaticlabs/core-team
|
||||
deps.bzl @prysmaticlabs/core-team
|
||||
2
.github/workflows/horusec.yaml
vendored
2
.github/workflows/horusec.yaml
vendored
@@ -19,4 +19,4 @@ jobs:
|
||||
- name: Running Security Scan
|
||||
run: |
|
||||
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
|
||||
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
@@ -1,34 +0,0 @@
|
||||
repos:
|
||||
# First run code formatters
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.1.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer # ensures that a file is either empty, or ends with one newline
|
||||
exclude_types: ["proto"]
|
||||
- id: mixed-line-ending # ensures that a file doesn't contain a mix of LF and CRLF
|
||||
- id: no-commit-to-branch # Protect specific branches (default: main/master) from direct checkins
|
||||
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: gci
|
||||
name: goimports
|
||||
entry: bash -c 'command -v gci >/dev/null 2>&1 || go install github.com/daixiang0/gci@latest; gci write --skip-generated -s standard -s default "$@"; goimports -w "$@"' --
|
||||
language: golang
|
||||
files: \.go$
|
||||
|
||||
- repo: https://github.com/dnephin/pre-commit-golang
|
||||
rev: v0.4.0
|
||||
hooks:
|
||||
- id: go-fmt
|
||||
args: [ -w, -s ] # simplify code and write result to (source) file instead of stdout
|
||||
- id: go-mod-tidy
|
||||
files: go.mod
|
||||
- id: golangci-lint
|
||||
|
||||
# Fix bazel build files
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: gazelle
|
||||
name: bazel-fix
|
||||
entry: .pre-commit/fixbazel.sh
|
||||
language: system
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
bazel run //:gazelle -- fix
|
||||
@@ -175,23 +175,6 @@ $ git push myrepo feature-in-progress-branch -f
|
||||
|
||||
**22. Finally, again leave a comment to the Core Contributors on the pull request to let them know that the pull request has been updated.**
|
||||
|
||||
### Git hooks and linters.
|
||||
|
||||
Prysm is configured with [pre-commit](https://pre-commit.com) **githooks** that ensures pull
|
||||
requests adhere to a minimum standard and are consistent. It is highly recommended running the githooks locally while developing for faster feedback.
|
||||
|
||||
To install githooks:
|
||||
- Follow installation instructions [here](https://pre-commit.com/#installation) to install the `pre-commit` tool.
|
||||
- Once installed, run `pre-commit install` in the project's root directory. This will set up the hooks.
|
||||
- Note you can skip the hooks by committing with `-n`: `git commit -n -m "look mom no githooks"`
|
||||
|
||||
To update githooks:
|
||||
```sh
|
||||
pre-commit clean
|
||||
```
|
||||
|
||||
The **linter** used is [golangci-lint](https://golangci-lint.run/). It runs as part of the githooks and is configured in [.golangci.yml](.golangci.yml)
|
||||
|
||||
## Maintaining CHANGELOG.md
|
||||
|
||||
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/). In order to minimize conflicts and workflow headaches, we chose to implement a changelog management
|
||||
|
||||
@@ -672,3 +672,4 @@ may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
|
||||
|
||||
@@ -50,3 +50,4 @@ We reserve the right to revise these Terms, and your rights and obligations are
|
||||
These Terms constitute the entire agreement between you and Offchain Labs regarding use of Prysm and will supersede all prior agreements whether, written or oral. No usage of trade or other regular practice or method of dealing between the parties will be used to modify, interpret, supplement, or alter the terms of these Terms.
|
||||
|
||||
If any portion of these Terms is held invalid or unenforceable, such invalidity or enforceability will not affect the other provisions of these Terms, which will remain in full force and effect, and the invalid or unenforceable portion will be given effect to the greatest extent possible. The failure of a party to require performance of any provision will not affect that party’s right to require performance at any time thereafter, nor will a waiver of any breach or default of these Terms or any provision of these Terms constitute a waiver of any subsequent breach or default or a waiver of the provision itself.
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"net/http"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
|
||||
)
|
||||
|
||||
|
||||
@@ -128,6 +128,7 @@ go_test(
|
||||
"receive_block_test.go",
|
||||
"service_norace_test.go",
|
||||
"service_test.go",
|
||||
"setup_forkchoice_test.go",
|
||||
"setup_test.go",
|
||||
"weak_subjectivity_checks_test.go",
|
||||
],
|
||||
|
||||
@@ -582,6 +582,7 @@ func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, [32]byte{}))
|
||||
_, err = c.IsOptimisticForRoot(ctx, br)
|
||||
assert.NoError(t, err)
|
||||
summ, err := beaconDB.StateSummary(ctx, br)
|
||||
|
||||
@@ -72,7 +72,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
if arg.attributes == nil {
|
||||
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), arg)
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
|
||||
if err != nil {
|
||||
switch {
|
||||
@@ -159,6 +158,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(arg.headRoot[:])),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"nextSlot": nextSlot,
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
|
||||
@@ -166,40 +166,19 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
"slot": headBlk.Slot(),
|
||||
"nextSlot": nextSlot,
|
||||
}).Error("Received nil payload ID on VALID engine response")
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
func firePayloadAttributesEvent(ctx context.Context, f event.SubscriberSender, cfg *fcuConfig) {
|
||||
pidx, err := helpers.BeaconProposerIndex(ctx, cfg.headState)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("head_root", cfg.headRoot[:]).
|
||||
Error("Could not get proposer index for PayloadAttributes event")
|
||||
return
|
||||
}
|
||||
evd := payloadattribute.EventData{
|
||||
ProposerIndex: pidx,
|
||||
ProposalSlot: cfg.headState.Slot(),
|
||||
ParentBlockRoot: cfg.headRoot[:],
|
||||
Attributer: cfg.attributes,
|
||||
HeadRoot: cfg.headRoot,
|
||||
HeadState: cfg.headState,
|
||||
HeadBlock: cfg.headBlock,
|
||||
}
|
||||
if cfg.headBlock != nil && !cfg.headBlock.IsNil() {
|
||||
headPayload, err := cfg.headBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return
|
||||
}
|
||||
evd.ParentBlockHash = headPayload.BlockHash()
|
||||
evd.ParentBlockNumber = headPayload.BlockNumber()
|
||||
}
|
||||
func firePayloadAttributesEvent(_ context.Context, f event.SubscriberSender, nextSlot primitives.Slot) {
|
||||
// the fcu args have differing amounts of completeness based on the code path,
|
||||
// and there is work we only want to do if a client is actually listening to the events beacon api endpoint.
|
||||
// temporary solution: just fire a blank event and fill in the details in the api handler.
|
||||
f.Send(&feed.Event{
|
||||
Type: statefeed.PayloadAttributes,
|
||||
Data: evd,
|
||||
Data: payloadattribute.EventData{ProposalSlot: nextSlot},
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -102,6 +102,8 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), s.CurrentSlot()+1)
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
if err := s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
|
||||
@@ -4164,4 +4164,4 @@
|
||||
"0xa92039a08b5502d5b211a7744099c9f93fa8c90cedcb1d05e92f01886219dd464eb5fb0337496ad96ed09c987da4e5f019035c5b01cc09b2a18b8a8dd419bc5895388a07e26958f6bd26751929c25f89b8eb4a299d822e2d26fec9ef350e0d3c",
|
||||
"0x92dcc5a1c8c3e1b28b1524e3dd6dbecd63017c9201da9dbe077f1b82adc08c50169f56fc7b5a3b28ec6b89254de3e2fd12838a761053437883c3e01ba616670cea843754548ef84bcc397de2369adcca2ab54cd73c55dc68d87aec3fc2fe4f10"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"testing"
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
@@ -18,6 +19,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithClockSynchronizer(cs),
|
||||
WithStateNotifier(&mock.MockStateNotifier{RecordEvents: true}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -503,6 +503,10 @@ func (s *Service) pruneCoveredElectraAttsFromPool(ctx context.Context, headState
|
||||
if err = s.cfg.AttestationCache.DeleteCovered(a); err != nil {
|
||||
return errors.Wrap(err, "could not delete covered attestation")
|
||||
}
|
||||
} else if !a.IsAggregated() {
|
||||
if err = s.cfg.AttPool.DeleteUnaggregatedAttestation(a); err != nil {
|
||||
return errors.Wrap(err, "could not delete unaggregated attestation")
|
||||
}
|
||||
} else if err = s.cfg.AttPool.DeleteAggregatedAttestation(a); err != nil {
|
||||
return errors.Wrap(err, "could not delete aggregated attestation")
|
||||
}
|
||||
@@ -723,13 +727,9 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: nil,
|
||||
attributes: attribute,
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), fcuArgs)
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), s.CurrentSlot()+1)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -7,12 +7,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
|
||||
@@ -50,6 +50,7 @@ import (
|
||||
|
||||
func Test_pruneAttsFromPool_Electra(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
logHook := logTest.NewGlobal()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
@@ -71,7 +72,7 @@ func Test_pruneAttsFromPool_Electra(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
att1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b11110111, 0b00000001},
|
||||
AggregationBits: bitfield.Bitlist{0b10000000, 0b00000001},
|
||||
Data: data,
|
||||
Signature: make([]byte, 96),
|
||||
CommitteeBits: cb,
|
||||
@@ -95,16 +96,15 @@ func Test_pruneAttsFromPool_Electra(t *testing.T) {
|
||||
CommitteeBits: cb,
|
||||
}
|
||||
|
||||
require.NoError(t, s.cfg.AttPool.SaveAggregatedAttestation(att1))
|
||||
require.NoError(t, s.cfg.AttPool.SaveUnaggregatedAttestation(att1))
|
||||
require.NoError(t, s.cfg.AttPool.SaveAggregatedAttestation(att2))
|
||||
require.NoError(t, s.cfg.AttPool.SaveAggregatedAttestation(att3))
|
||||
require.Equal(t, 3, len(s.cfg.AttPool.AggregatedAttestations()))
|
||||
|
||||
cb = primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
cb.SetBitAt(1, true)
|
||||
onChainAtt := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b11110111, 0b11110111, 0b00000001},
|
||||
AggregationBits: bitfield.Bitlist{0b10000000, 0b11110111, 0b00000001},
|
||||
Data: data,
|
||||
Signature: make([]byte, 96),
|
||||
CommitteeBits: cb,
|
||||
@@ -127,7 +127,12 @@ func Test_pruneAttsFromPool_Electra(t *testing.T) {
|
||||
require.Equal(t, 4, len(committees))
|
||||
|
||||
require.NoError(t, s.pruneAttsFromPool(ctx, st, rob))
|
||||
attsInPool := s.cfg.AttPool.AggregatedAttestations()
|
||||
require.LogsDoNotContain(t, logHook, "Could not prune attestations")
|
||||
|
||||
attsInPool, err := s.cfg.AttPool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(attsInPool))
|
||||
attsInPool = s.cfg.AttPool.AggregatedAttestations()
|
||||
require.Equal(t, 1, len(attsInPool))
|
||||
assert.DeepEqual(t, att3, attsInPool[0])
|
||||
}
|
||||
@@ -908,6 +913,8 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -928,6 +935,7 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.pruneAttsFromPool(context.Background(), nil /* state not needed pre-Electra */, wsb))
|
||||
require.LogsDoNotContain(t, logHook, "Could not prune attestations")
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
@@ -1983,6 +1991,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
@@ -2117,6 +2126,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, jroot))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(service.cfg.StateGen.ActiveNonSlashedBalancesByRoot)
|
||||
require.NoError(t, service.StartFromSavedState(genesisState))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
@@ -2126,7 +2136,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
require.Equal(t, false, optimistic)
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
// last valid state's justified checkpoint
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
@@ -316,32 +317,36 @@ func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error
|
||||
return genesisBlkRoot, nil
|
||||
}
|
||||
|
||||
// initializeHeadFromDB uses the finalized checkpoint and head block found in the database to set the current head.
|
||||
// initializeHeadFromDB uses the finalized checkpoint and head block root from forkchoice to set the current head.
|
||||
// Note that this may block until stategen replays blocks between the finalized and head blocks
|
||||
// if the head sync flag was specified and the gap between the finalized and head blocks is at least 128 epochs long.
|
||||
func (s *Service) initializeHeadFromDB(ctx context.Context, finalizedState state.BeaconState) error {
|
||||
func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) error {
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := [32]byte(cp.Root)
|
||||
finalizedRoot := s.ensureRootNotZeros(fRoot)
|
||||
|
||||
if finalizedState == nil || finalizedState.IsNil() {
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("finalized state can't be nil")
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.getBlock(ctx, finalizedRoot)
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
root := s.cfg.ForkChoiceStore.HighestReceivedBlockRoot()
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
blk, err := s.cfg.BeaconDB.Block(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
return errors.Wrap(err, "could not get head block")
|
||||
}
|
||||
if err := s.setHead(&head{
|
||||
finalizedRoot,
|
||||
finalizedBlock,
|
||||
finalizedState,
|
||||
finalizedBlock.Block().Slot(),
|
||||
false,
|
||||
}); err != nil {
|
||||
if root != fRoot {
|
||||
st, err = s.cfg.StateGen.StateByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
}
|
||||
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false}); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": blk.Block().Slot(),
|
||||
}).Info("Initialized head block from DB")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,28 +2,119 @@ package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
if err := s.setupForkchoiceCheckpoints(); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceRoot(st); err != nil {
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
}
|
||||
if err := s.initializeHeadFromDB(s.ctx, st); err != nil {
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
if headStr == "" {
|
||||
return fRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
|
||||
func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
headRoot := s.startupHeadRoot()
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
if err := s.setupForkchoiceRoot(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
}
|
||||
if headRoot == fRoot {
|
||||
return nil
|
||||
}
|
||||
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
chain, err := s.buildForkchoiceChain(s.ctx, blk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
|
||||
}
|
||||
|
||||
func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.ReadOnlySignedBeaconBlock) ([]*forkchoicetypes.BlockAndCheckpoints, error) {
|
||||
chain := []*forkchoicetypes.BlockAndCheckpoints{}
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
root, err := head.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head block root")
|
||||
}
|
||||
for {
|
||||
roblock, err := blocks.NewROBlockWithRoot(head, root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This chain sets the justified checkpoint for every block, including some that are older than jp.
|
||||
// This should be however safe for forkchoice at startup. An alternative would be to hook during the
|
||||
// block processing pipeline when setting the head state, to compute the right states for the justified
|
||||
// checkpoint.
|
||||
chain = append(chain, &forkchoicetypes.BlockAndCheckpoints{Block: roblock, JustifiedCheckpoint: jp, FinalizedCheckpoint: cp})
|
||||
root = head.Block().ParentRoot()
|
||||
if root == fRoot {
|
||||
break
|
||||
}
|
||||
head, err = s.cfg.BeaconDB.Block(s.ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get block")
|
||||
}
|
||||
if slots.ToEpoch(head.Block().Slot()) < cp.Epoch {
|
||||
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
|
||||
}
|
||||
}
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
func (s *Service) setupForkchoiceRoot(st state.BeaconState) error {
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
|
||||
128
beacon-chain/blockchain/setup_forkchoice_test.go
Normal file
128
beacon-chain/blockchain/setup_forkchoice_test.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_startupHeadRoot(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
hook := logTest.NewGlobal()
|
||||
cp := service.FinalizedCheckpt()
|
||||
require.DeepEqual(t, cp.Root, params.BeaconConfig().ZeroHash[:])
|
||||
gr := [32]byte{'r', 'o', 'o', 't'}
|
||||
service.originBlockRoot = gr
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, gr))
|
||||
t.Run("start from finalized", func(t *testing.T) {
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
})
|
||||
t.Run("head requested, error path", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ForceHead: "head",
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "could not get head block root, starting with finalized block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
hr := [32]byte{'h', 'e', 'a', 'd'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, hr), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, hr), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, hr))
|
||||
|
||||
t.Run("start from head", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ForceHead: "head",
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), hr)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_setupForkchoiceTree_Finalized(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
|
||||
func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ForceHead: "head",
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesisState, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(1))
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), primitives.Slot(2))
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, preState))
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, root))
|
||||
cp := service.FinalizedCheckpt()
|
||||
fRoot := service.ensureRootNotZeros([32]byte(cp.Root))
|
||||
require.NotEqual(t, fRoot, root)
|
||||
require.Equal(t, root, service.startupHeadRoot())
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
@@ -5,14 +5,15 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
light_client "github.com/prysmaticlabs/prysm/v5/consensus-types/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensustypes "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
light_client "github.com/prysmaticlabs/prysm/v5/consensus-types/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
v11 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
|
||||
@@ -110,6 +110,7 @@ type HeadAccessDatabase interface {
|
||||
|
||||
// Block related methods.
|
||||
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HeadBlockRoot() ([32]byte, error)
|
||||
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
|
||||
// Genesis operations.
|
||||
|
||||
@@ -30,22 +30,32 @@ var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
|
||||
defer span.End()
|
||||
// Return block from cache if it exists.
|
||||
blk, err := s.getBlock(ctx, blockRoot, nil)
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func (s *Store) getBlock(ctx context.Context, blockRoot [32]byte, tx *bolt.Tx) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if v, ok := s.blockCache.Get(string(blockRoot[:])); v != nil && ok {
|
||||
return v.(interfaces.ReadOnlySignedBeaconBlock), nil
|
||||
}
|
||||
var blk interfaces.ReadOnlySignedBeaconBlock
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
enc := bkt.Get(blockRoot[:])
|
||||
if enc == nil {
|
||||
return nil
|
||||
}
|
||||
// This method allows the caller to pass in its tx if one is already open.
|
||||
// Or if a nil value is used, a transaction will be managed intenally.
|
||||
if tx == nil {
|
||||
var err error
|
||||
blk, err = unmarshalBlock(ctx, enc)
|
||||
return err
|
||||
})
|
||||
return blk, err
|
||||
tx, err = s.db.Begin(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := tx.Rollback(); err != nil {
|
||||
log.WithError(err).Error("could not rollback read-only getBlock transaction")
|
||||
}
|
||||
}()
|
||||
}
|
||||
return unmarshalBlock(ctx, tx.Bucket(blocksBucket).Get(blockRoot[:]))
|
||||
}
|
||||
|
||||
// OriginCheckpointBlockRoot returns the value written to the db in SaveOriginCheckpointBlockRoot
|
||||
@@ -70,6 +80,21 @@ func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
return root, err
|
||||
}
|
||||
|
||||
// HeadBlockRoot returns the latest canonical block root in the Ethereum Beacon Chain.
|
||||
func (s *Store) HeadBlockRoot() ([32]byte, error) {
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
headRoot := bkt.Get(headBlockRootKey)
|
||||
if len(headRoot) == 0 {
|
||||
return errors.New("no head block root found")
|
||||
}
|
||||
copy(root[:], headRoot)
|
||||
return nil
|
||||
})
|
||||
return root, err
|
||||
}
|
||||
|
||||
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
|
||||
func (s *Store) HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
|
||||
@@ -227,6 +252,21 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
// Look up the block to find its slot; needed to remove the slot index entry.
|
||||
blk, err := s.getBlock(ctx, root, tx)
|
||||
if err != nil {
|
||||
// getBlock can return ErrNotFound, in which case we won't even try to delete it.
|
||||
if errors.Is(err, ErrNotFound) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := s.deleteSlotIndexEntry(tx, blk.Block().Slot(), root); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.deleteMatchingParentIndex(tx, blk.Block().ParentRoot(), root); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.deleteBlock(tx, root[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -899,6 +939,9 @@ func createBlockIndicesFromFilters(ctx context.Context, f *filters.QueryFilter)
|
||||
|
||||
// unmarshal block from marshaled proto beacon block bytes to versioned beacon block struct type.
|
||||
func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if len(enc) == 0 {
|
||||
return nil, errors.Wrap(ErrNotFound, "empty block bytes in db")
|
||||
}
|
||||
var err error
|
||||
enc, err = snappy.Decode(nil, enc)
|
||||
if err != nil {
|
||||
@@ -1050,6 +1093,47 @@ func (s *Store) deleteBlock(tx *bolt.Tx, root []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) deleteMatchingParentIndex(tx *bolt.Tx, parent, child [32]byte) error {
|
||||
bkt := tx.Bucket(blockParentRootIndicesBucket)
|
||||
if err := deleteRootIndexEntry(bkt, parent[:], child); err != nil {
|
||||
return errors.Wrap(err, "could not delete parent root index entry")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) deleteSlotIndexEntry(tx *bolt.Tx, slot primitives.Slot, root [32]byte) error {
|
||||
key := bytesutil.SlotToBytesBigEndian(slot)
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
if err := deleteRootIndexEntry(bkt, key, root); err != nil {
|
||||
return errors.Wrap(err, "could not delete slot index entry")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteRootIndexEntry(bkt *bolt.Bucket, key []byte, root [32]byte) error {
|
||||
packed := bkt.Get(key)
|
||||
if len(packed) == 0 {
|
||||
return nil
|
||||
}
|
||||
updated, err := removeRoot(packed, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Don't update the value if the root was not found.
|
||||
if bytes.Equal(updated, packed) {
|
||||
return nil
|
||||
}
|
||||
// If there are no other roots in the key, just delete it.
|
||||
if len(updated) == 0 {
|
||||
if err := bkt.Delete(key); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Update the key with the root removed.
|
||||
return bkt.Put(key, updated)
|
||||
}
|
||||
|
||||
func (s *Store) deleteValidatorHashes(tx *bolt.Tx, root []byte) error {
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
|
||||
@@ -196,9 +196,13 @@ func TestStore_BlocksCRUD(t *testing.T) {
|
||||
blockRoot, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = db.getBlock(ctx, blockRoot, nil)
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
retrievedBlock, err := db.Block(ctx, blockRoot)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, nil, retrievedBlock, "Expected nil block")
|
||||
_, err = db.getBlock(ctx, blockRoot, nil)
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
assert.Equal(t, true, db.HasBlock(ctx, blockRoot), "Expected block to exist in the db")
|
||||
@@ -214,10 +218,34 @@ func TestStore_BlocksCRUD(t *testing.T) {
|
||||
retrievedPb, err := retrievedBlock.Proto()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, proto.Equal(wantedPb, retrievedPb), "Wanted: %v, received: %v", wanted, retrievedBlock)
|
||||
// Check that the block is in the slot->block index
|
||||
found, roots, err := db.BlockRootsBySlot(ctx, blk.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, found)
|
||||
require.Equal(t, 1, len(roots))
|
||||
require.Equal(t, blockRoot, roots[0])
|
||||
// Delete the block, then check that it is no longer in the index.
|
||||
|
||||
parent := blk.Block().ParentRoot()
|
||||
testCheckParentIndices(t, db.db, parent, true)
|
||||
require.NoError(t, db.DeleteBlock(ctx, blockRoot))
|
||||
require.NoError(t, err)
|
||||
testCheckParentIndices(t, db.db, parent, false)
|
||||
found, roots, err = db.BlockRootsBySlot(ctx, blk.Block().Slot())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, found)
|
||||
require.Equal(t, 0, len(roots))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testCheckParentIndices(t *testing.T, db *bolt.DB, parent [32]byte, expected bool) {
|
||||
require.NoError(t, db.View(func(tx *bolt.Tx) error {
|
||||
require.Equal(t, expected, tx.Bucket(blockParentRootIndicesBucket).Get(parent[:]) != nil)
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
func TestStore_BlocksHandleZeroCase(t *testing.T) {
|
||||
for _, tt := range blockTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -114,3 +114,27 @@ func splitRoots(b []byte) ([][32]byte, error) {
|
||||
}
|
||||
return rl, nil
|
||||
}
|
||||
|
||||
func removeRoot(roots []byte, root [32]byte) ([]byte, error) {
|
||||
if len(roots) == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
if len(roots) == 32 && bytes.Equal(roots, root[:]) {
|
||||
return []byte{}, nil
|
||||
}
|
||||
if len(roots)%32 != 0 {
|
||||
return nil, errors.Wrapf(errMisalignedRootList, "root list len=%d", len(roots))
|
||||
}
|
||||
|
||||
search := root[:]
|
||||
for i := 0; i <= len(roots)-32; i += 32 {
|
||||
if bytes.Equal(roots[i:i+32], search) {
|
||||
result := make([]byte, len(roots)-32)
|
||||
copy(result, roots[:i])
|
||||
copy(result[i:], roots[i+32:])
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
@@ -195,3 +196,85 @@ func TestSplitRoots(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func tPad(p ...[]byte) []byte {
|
||||
r := make([]byte, 32*len(p))
|
||||
for i, b := range p {
|
||||
copy(r[i*32:], b)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestRemoveRoot(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
roots []byte
|
||||
root [32]byte
|
||||
expect []byte
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
roots: []byte{},
|
||||
root: [32]byte{0xde, 0xad, 0xbe, 0xef},
|
||||
expect: []byte{},
|
||||
},
|
||||
{
|
||||
name: "single",
|
||||
roots: tPad([]byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
root: [32]byte{0xde, 0xad, 0xbe, 0xef},
|
||||
expect: []byte{},
|
||||
},
|
||||
{
|
||||
name: "single, different",
|
||||
roots: tPad([]byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
root: [32]byte{0xde, 0xad, 0xbe, 0xee},
|
||||
expect: tPad([]byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
},
|
||||
{
|
||||
name: "multi",
|
||||
roots: tPad([]byte{0xde, 0xad, 0xbe, 0xef}, []byte{0xac, 0x1d, 0xfa, 0xce}),
|
||||
root: [32]byte{0xac, 0x1d, 0xfa, 0xce},
|
||||
expect: tPad([]byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
},
|
||||
{
|
||||
name: "multi, reordered",
|
||||
roots: tPad([]byte{0xac, 0x1d, 0xfa, 0xce}, []byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
root: [32]byte{0xac, 0x1d, 0xfa, 0xce},
|
||||
expect: tPad([]byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
},
|
||||
{
|
||||
name: "multi, 3",
|
||||
roots: tPad([]byte{0xac, 0x1d, 0xfa, 0xce}, []byte{0xbe, 0xef, 0xca, 0xb5}, []byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
root: [32]byte{0xac, 0x1d, 0xfa, 0xce},
|
||||
expect: tPad([]byte{0xbe, 0xef, 0xca, 0xb5}, []byte{0xde, 0xad, 0xbe, 0xef}),
|
||||
},
|
||||
{
|
||||
name: "multi, different",
|
||||
roots: tPad([]byte{0xde, 0xad, 0xbe, 0xef}, []byte{0xac, 0x1d, 0xfa, 0xce}),
|
||||
root: [32]byte{0xac, 0x1d, 0xbe, 0xa7},
|
||||
expect: tPad([]byte{0xde, 0xad, 0xbe, 0xef}, []byte{0xac, 0x1d, 0xfa, 0xce}),
|
||||
},
|
||||
{
|
||||
name: "misaligned",
|
||||
roots: make([]byte, 61),
|
||||
root: [32]byte{0xac, 0x1d, 0xbe, 0xa7},
|
||||
err: errMisalignedRootList,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
before := make([]byte, len(c.roots))
|
||||
copy(before, c.roots)
|
||||
r, err := removeRoot(c.roots, c.root)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(c.expect), len(r))
|
||||
require.Equal(t, true, bytes.Equal(c.expect, r))
|
||||
require.Equal(t, true, bytes.Equal(before, c.roots))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -19,7 +21,17 @@ func (s *Store) LastValidatedCheckpoint(ctx context.Context) (*ethpb.Checkpoint,
|
||||
if enc == nil {
|
||||
var finErr error
|
||||
checkpoint, finErr = s.FinalizedCheckpoint(ctx)
|
||||
return finErr
|
||||
if finErr != nil {
|
||||
return finErr
|
||||
}
|
||||
if bytes.Equal(checkpoint.Root, params.BeaconConfig().ZeroHash[:]) {
|
||||
bkt = tx.Bucket(blocksBucket)
|
||||
r := bkt.Get(genesisBlockRootKey)
|
||||
if r != nil {
|
||||
checkpoint.Root = r
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
checkpoint = ðpb.Checkpoint{}
|
||||
return decode(ctx, enc, checkpoint)
|
||||
|
||||
@@ -5,15 +5,17 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dbtest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
slottest "github.com/prysmaticlabs/prysm/v5/time/slots/testing"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
dbtest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"baseFeePerGas\":\"0x7fffffffffffffff\",\"difficulty\":\"0x7fffffffffffffff\",\"extraData\":\"0x\",\"gasLimit\":\"0xffffffffffffffff\",\"gasUsed\":\"0xffffffffffffffff\",\"hash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"logsBloom\":\"0x6a756e6b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"miner\":\"0x0000000000000000000000000000000000000000\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"nonce\":\"0x0000000000000000\",\"number\":\"0x7fffffffffffffff\",\"parentHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"receiptsRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"sha3Uncles\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"stateRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"timestamp\":\"0x64\",\"totalDifficulty\":\"999999999999999999999999999999999999999\",\"transactions\":[{\"type\":\"0x2\",\"nonce\":\"0xffffffffffffffff\",\"gasPrice\":null,\"maxPriorityFeePerGas\":\"0x7fffffffffffffff\",\"maxFeePerGas\":\"0x7fffffffffffffff\",\"gas\":\"0xffffffffffffffff\",\"value\":\"0x7fffffffffffffff\",\"input\":\"0x72616e646f6d\",\"v\":\"0x0\",\"r\":\"0x7fffffffffffffff\",\"s\":\"0x7fffffffffffffff\",\"to\":\"0x095e7baea6a6c7c4c2dfeb977efac326af552d87\",\"chainId\":\"0x7fffffffffffffff\",\"accessList\":[],\"hash\":\"0x26db3ef2c0e5945b24088d6a4165d0bb2959abd848b57891aa041b72518548ab\"},{\"type\":\"0x2\",\"nonce\":\"0xffffffffffffffff\",\"gasPrice\":null,\"maxPriorityFeePerGas\":\"0x7fffffffffffffff\",\"maxFeePerGas\":\"0x7fffffffffffffff\",\"gas\":\"0xfffffffffffaffff\",\"value\":\"0x7fffffffffffffff\",\"input\":\"0x72616e646f6d\",\"v\":\"0x0\",\"r\":\"0x7fffffffffffffff\",\"s\":\"0x7fffffffffffffff\",\"to\":\"0x095e7baea6a6c7c4c2dfeb977efac326af552d87\",\"chainId\":\"0x7fffffffffffffff\",\"accessList\":[],\"hash\":\"0x26db3ef2c0e5945b24088d6a4165d0bb2959abd848b57891aa041b72518548ab\"}],\"transactionsRoot\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}")
|
||||
[]byte("{\"baseFeePerGas\":\"0x7fffffffffffffff\",\"difficulty\":\"0x7fffffffffffffff\",\"extraData\":\"0x\",\"gasLimit\":\"0xffffffffffffffff\",\"gasUsed\":\"0xffffffffffffffff\",\"hash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"logsBloom\":\"0x6a756e6b000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"miner\":\"0x0000000000000000000000000000000000000000\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"nonce\":\"0x0000000000000000\",\"number\":\"0x7fffffffffffffff\",\"parentHash\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"receiptsRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"sha3Uncles\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"stateRoot\":\"0xff01ff01ff01ff01ff01ff01ff01ff0100000000000000000000000000000000\",\"timestamp\":\"0x64\",\"totalDifficulty\":\"999999999999999999999999999999999999999\",\"transactions\":[{\"type\":\"0x2\",\"nonce\":\"0xffffffffffffffff\",\"gasPrice\":null,\"maxPriorityFeePerGas\":\"0x7fffffffffffffff\",\"maxFeePerGas\":\"0x7fffffffffffffff\",\"gas\":\"0xffffffffffffffff\",\"value\":\"0x7fffffffffffffff\",\"input\":\"0x72616e646f6d\",\"v\":\"0x0\",\"r\":\"0x7fffffffffffffff\",\"s\":\"0x7fffffffffffffff\",\"to\":\"0x095e7baea6a6c7c4c2dfeb977efac326af552d87\",\"chainId\":\"0x7fffffffffffffff\",\"accessList\":[],\"hash\":\"0x26db3ef2c0e5945b24088d6a4165d0bb2959abd848b57891aa041b72518548ab\"},{\"type\":\"0x2\",\"nonce\":\"0xffffffffffffffff\",\"gasPrice\":null,\"maxPriorityFeePerGas\":\"0x7fffffffffffffff\",\"maxFeePerGas\":\"0x7fffffffffffffff\",\"gas\":\"0xfffffffffffaffff\",\"value\":\"0x7fffffffffffffff\",\"input\":\"0x72616e646f6d\",\"v\":\"0x0\",\"r\":\"0x7fffffffffffffff\",\"s\":\"0x7fffffffffffffff\",\"to\":\"0x095e7baea6a6c7c4c2dfeb977efac326af552d87\",\"chainId\":\"0x7fffffffffffffff\",\"accessList\":[],\"hash\":\"0x26db3ef2c0e5945b24088d6a4165d0bb2959abd848b57891aa041b72518548ab\"}],\"transactionsRoot\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}")
|
||||
@@ -1,2 +1,2 @@
|
||||
go test fuzz v1
|
||||
[]byte("{\"0000000000000\":{},\"pAYloAdId\":\"\"}")
|
||||
[]byte("{\"0000000000000\":{},\"pAYloAdId\":\"\"}")
|
||||
@@ -252,6 +252,13 @@ func (s *Store) tips() ([][32]byte, []primitives.Slot) {
|
||||
return roots, slots
|
||||
}
|
||||
|
||||
func (f *ForkChoice) HighestReceivedBlockRoot() [32]byte {
|
||||
if f.store.highestReceivedNode == nil {
|
||||
return [32]byte{}
|
||||
}
|
||||
return f.store.highestReceivedNode.root
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot returns the highest slot received by the forkchoice
|
||||
func (f *ForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
if f.store.highestReceivedNode == nil {
|
||||
|
||||
@@ -65,6 +65,7 @@ type FastGetter interface {
|
||||
FinalizedPayloadBlockHash() [32]byte
|
||||
HasNode([32]byte) bool
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
HighestReceivedBlockRoot() [32]byte
|
||||
HighestReceivedBlockDelay() primitives.Slot
|
||||
IsCanonical(root [32]byte) bool
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
|
||||
@@ -114,6 +114,13 @@ func (ro *ROForkChoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return ro.getter.HighestReceivedBlockSlot()
|
||||
}
|
||||
|
||||
// HighestReceivedBlockRoot delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HighestReceivedBlockRoot() [32]byte {
|
||||
ro.l.RLock()
|
||||
defer ro.l.RUnlock()
|
||||
return ro.getter.HighestReceivedBlockRoot()
|
||||
}
|
||||
|
||||
// HighestReceivedBlockDelay delegates to the underlying forkchoice call, under a lock.
|
||||
func (ro *ROForkChoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
ro.l.RLock()
|
||||
|
||||
@@ -29,6 +29,7 @@ const (
|
||||
unrealizedJustifiedPayloadBlockHashCalled
|
||||
nodeCountCalled
|
||||
highestReceivedBlockSlotCalled
|
||||
highestReceivedBlockRootCalled
|
||||
highestReceivedBlockDelayCalled
|
||||
receivedBlocksLastEpochCalled
|
||||
weightCalled
|
||||
@@ -252,6 +253,11 @@ func (ro *mockROForkchoice) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HighestReceivedBlockRoot() [32]byte {
|
||||
ro.calls = append(ro.calls, highestReceivedBlockRootCalled)
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
func (ro *mockROForkchoice) HighestReceivedBlockDelay() primitives.Slot {
|
||||
ro.calls = append(ro.calls, highestReceivedBlockDelayCalled)
|
||||
return 0
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SaveUnaggregatedAttestation saves an unaggregated attestation in cache.
|
||||
@@ -60,7 +61,8 @@ func (c *AttCaches) UnaggregatedAttestations() ([]ethpb.Att, error) {
|
||||
for _, att := range unAggregatedAtts {
|
||||
seen, err := c.hasSeenBit(att)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.WithError(err).Debug("Could not check if unaggregated attestation's bit has been seen. Attestation will not be returned")
|
||||
continue
|
||||
}
|
||||
if !seen {
|
||||
atts = append(atts, att.Clone())
|
||||
@@ -137,7 +139,7 @@ func (c *AttCaches) DeleteUnaggregatedAttestation(att ethpb.Att) error {
|
||||
}
|
||||
|
||||
if err := c.insertSeenBit(att); err != nil {
|
||||
return err
|
||||
log.WithError(err).Debug("Could not insert seen bit of unaggregated attestation. Attestation will be deleted")
|
||||
}
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Full)
|
||||
@@ -163,7 +165,12 @@ func (c *AttCaches) DeleteSeenUnaggregatedAttestations() (int, error) {
|
||||
if att == nil || att.IsNil() || att.IsAggregated() {
|
||||
continue
|
||||
}
|
||||
if seen, err := c.hasSeenBit(att); err == nil && seen {
|
||||
seen, err := c.hasSeenBit(att)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check if unaggregated attestation's bit has been seen. Attestation will be deleted")
|
||||
seen = true
|
||||
}
|
||||
if seen {
|
||||
delete(c.unAggregatedAtt, r)
|
||||
count++
|
||||
}
|
||||
|
||||
@@ -17,6 +17,24 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestKV_Unaggregated_UnaggregatedAttestations(t *testing.T) {
|
||||
t.Run("not returned when hasSeenBit fails", func(t *testing.T) {
|
||||
att := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}})
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := NewAttCaches()
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestation(att))
|
||||
cache.seenAtt.Delete(id.String())
|
||||
// cache a bitlist whose length is different from the attestation bitlist's length
|
||||
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0b1001}}, c.DefaultExpiration)
|
||||
|
||||
atts, err := cache.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(atts))
|
||||
})
|
||||
}
|
||||
|
||||
func TestKV_Unaggregated_SaveUnaggregatedAttestation(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -155,6 +173,21 @@ func TestKV_Unaggregated_DeleteUnaggregatedAttestation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, []ethpb.Att{}, returned)
|
||||
})
|
||||
|
||||
t.Run("deleted when insertSeenBit fails", func(t *testing.T) {
|
||||
att := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}})
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := NewAttCaches()
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestation(att))
|
||||
cache.seenAtt.Delete(id.String())
|
||||
// cache a bitlist whose length is different from the attestation bitlist's length
|
||||
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0b1001}}, c.DefaultExpiration)
|
||||
|
||||
require.NoError(t, cache.DeleteUnaggregatedAttestation(att))
|
||||
assert.Equal(t, 0, len(cache.unAggregatedAtt), "Attestation was not deleted")
|
||||
})
|
||||
}
|
||||
|
||||
func TestKV_Unaggregated_DeleteSeenUnaggregatedAttestations(t *testing.T) {
|
||||
@@ -232,6 +265,23 @@ func TestKV_Unaggregated_DeleteSeenUnaggregatedAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, []ethpb.Att{}, returned)
|
||||
})
|
||||
|
||||
t.Run("deleted when hasSeenBit fails", func(t *testing.T) {
|
||||
att := util.HydrateAttestation(ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1}, AggregationBits: bitfield.Bitlist{0b101}})
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
|
||||
cache := NewAttCaches()
|
||||
require.NoError(t, cache.SaveUnaggregatedAttestation(att))
|
||||
cache.seenAtt.Delete(id.String())
|
||||
// cache a bitlist whose length is different from the attestation bitlist's length
|
||||
cache.seenAtt.Set(id.String(), []bitfield.Bitlist{{0b1001}}, c.DefaultExpiration)
|
||||
|
||||
count, err := cache.DeleteSeenUnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, count)
|
||||
assert.Equal(t, 0, len(cache.unAggregatedAtt), "Attestation was not deleted")
|
||||
})
|
||||
}
|
||||
|
||||
func TestKV_Unaggregated_UnaggregatedAttestationsBySlotIndex(t *testing.T) {
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
|
||||
set -e
|
||||
|
||||
chown prysm-beacon:prysm-beacon /etc/prysm/beacon-chain.yaml
|
||||
chown prysm-beacon:prysm-beacon /etc/prysm/beacon-chain.yaml
|
||||
@@ -10,4 +10,4 @@ getent passwd $SERVICE_USER > /dev/null || useradd -s /bin/false --no-create-hom
|
||||
# Create directories
|
||||
mkdir -p /etc/prysm
|
||||
mkdir -p /var/lib/prysm
|
||||
install -d -m 0700 -o $SERVICE_USER -g $SERVICE_USER /var/lib/prysm/beacon-chain
|
||||
install -d -m 0700 -o $SERVICE_USER -g $SERVICE_USER /var/lib/prysm/beacon-chain
|
||||
@@ -34,4 +34,4 @@ RestrictSUIDSGID=yes
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX AF_NETLINK
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
WantedBy=multi-user.target
|
||||
@@ -633,7 +633,7 @@ func (s *Server) SubmitBLSToExecutionChanges(w http.ResponseWriter, r *http.Requ
|
||||
toBroadcast = append(toBroadcast, sbls)
|
||||
}
|
||||
}
|
||||
go s.broadcastBLSChanges(ctx, toBroadcast)
|
||||
go s.broadcastBLSChanges(context.Background(), toBroadcast)
|
||||
if len(failures) > 0 {
|
||||
failuresErr := &server.IndexedVerificationFailureError{
|
||||
Code: http.StatusBadRequest,
|
||||
|
||||
@@ -160,6 +160,8 @@ func TestGetSpec(t *testing.T) {
|
||||
config.MaxTransactionsPerPayload = 99
|
||||
config.FieldElementsPerBlob = 100
|
||||
config.KzgCommitmentInclusionProofDepth = 101
|
||||
config.BlobsidecarSubnetCount = 102
|
||||
config.BlobsidecarSubnetCountElectra = 103
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -198,7 +200,7 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 168, len(data))
|
||||
assert.Equal(t, 170, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -559,6 +561,10 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "100", v)
|
||||
case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "101", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT":
|
||||
assert.Equal(t, "102", v)
|
||||
case "BLOB_SIDECAR_SUBNET_COUNT_ELECTRA":
|
||||
assert.Equal(t, "103", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,8 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
@@ -58,6 +59,7 @@ go_test(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/payload-attribute:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -20,7 +21,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
chaintime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
@@ -352,9 +354,18 @@ func writeLazyReaderWithRecover(w *streamingResponseWriterController, lr lazyRea
|
||||
if r := recover(); r != nil {
|
||||
log.WithField("panic", r).Error("Recovered from panic while writing event to client.")
|
||||
err = errWriterUnusable
|
||||
debug.PrintStack()
|
||||
}
|
||||
}()
|
||||
if lr == nil {
|
||||
log.Warn("Event stream skipping a nil lazy event reader callback")
|
||||
return nil
|
||||
}
|
||||
r := lr()
|
||||
if r == nil {
|
||||
log.Warn("Event stream skipping a nil event reader")
|
||||
return nil
|
||||
}
|
||||
out, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -600,27 +611,14 @@ func (s *Server) lazyReaderForEvent(ctx context.Context, event *feed.Event, topi
|
||||
|
||||
var errUnsupportedPayloadAttribute = errors.New("cannot compute payload attributes pre-Bellatrix")
|
||||
|
||||
func (s *Server) computePayloadAttributes(ctx context.Context, ev payloadattribute.EventData) (payloadattribute.Attributer, error) {
|
||||
v := ev.HeadState.Version()
|
||||
func (s *Server) computePayloadAttributes(ctx context.Context, st state.ReadOnlyBeaconState, root [32]byte, proposer primitives.ValidatorIndex, timestamp uint64, randao []byte) (payloadattribute.Attributer, error) {
|
||||
v := st.Version()
|
||||
if v < version.Bellatrix {
|
||||
return nil, errors.Wrapf(errUnsupportedPayloadAttribute, "%s is not supported", version.String(v))
|
||||
}
|
||||
|
||||
t, err := slots.ToTime(ev.HeadState.GenesisTime(), ev.HeadState.Slot())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state slot time")
|
||||
}
|
||||
timestamp := uint64(t.Unix())
|
||||
prevRando, err := helpers.RandaoMix(ev.HeadState, chaintime.CurrentEpoch(ev.HeadState))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state randao mix")
|
||||
}
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(ctx, ev.HeadState)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state proposer index")
|
||||
}
|
||||
feeRecpt := params.BeaconConfig().DefaultFeeRecipient.Bytes()
|
||||
tValidator, exists := s.TrackedValidatorsCache.Validator(proposerIndex)
|
||||
tValidator, exists := s.TrackedValidatorsCache.Validator(proposer)
|
||||
if exists {
|
||||
feeRecpt = tValidator.FeeRecipient[:]
|
||||
}
|
||||
@@ -628,34 +626,30 @@ func (s *Server) computePayloadAttributes(ctx context.Context, ev payloadattribu
|
||||
if v == version.Bellatrix {
|
||||
return payloadattribute.New(&engine.PayloadAttributes{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRando,
|
||||
PrevRandao: randao,
|
||||
SuggestedFeeRecipient: feeRecpt,
|
||||
})
|
||||
}
|
||||
|
||||
w, _, err := ev.HeadState.ExpectedWithdrawals()
|
||||
w, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals from head state")
|
||||
}
|
||||
if v == version.Capella {
|
||||
return payloadattribute.New(&engine.PayloadAttributesV2{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRando,
|
||||
PrevRandao: randao,
|
||||
SuggestedFeeRecipient: feeRecpt,
|
||||
Withdrawals: w,
|
||||
})
|
||||
}
|
||||
|
||||
pr, err := ev.HeadBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute head block root")
|
||||
}
|
||||
return payloadattribute.New(&engine.PayloadAttributesV3{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRando,
|
||||
PrevRandao: randao,
|
||||
SuggestedFeeRecipient: feeRecpt,
|
||||
Withdrawals: w,
|
||||
ParentBeaconBlockRoot: pr[:],
|
||||
ParentBeaconBlockRoot: root[:],
|
||||
})
|
||||
}
|
||||
|
||||
@@ -665,37 +659,75 @@ type asyncPayloadAttrData struct {
|
||||
err error
|
||||
}
|
||||
|
||||
var zeroRoot [32]byte
|
||||
|
||||
// needsFill allows tests to provide filled EventData values. An ordinary event data value fired by the blockchain package will have
|
||||
// all of the checked fields empty, so the logical short circuit should hit immediately.
|
||||
func needsFill(ev payloadattribute.EventData) bool {
|
||||
return ev.HeadState == nil || ev.HeadState.IsNil() || ev.HeadState.LatestBlockHeader() == nil ||
|
||||
ev.HeadBlock == nil || ev.HeadBlock.IsNil() ||
|
||||
ev.HeadRoot == zeroRoot || len(ev.ParentBlockRoot) == 0 || len(ev.ParentBlockHash) == 0 ||
|
||||
ev.Attributer == nil || ev.Attributer.IsEmpty()
|
||||
}
|
||||
|
||||
func (s *Server) fillEventData(ctx context.Context, ev payloadattribute.EventData) (payloadattribute.EventData, error) {
|
||||
if ev.HeadBlock == nil || ev.HeadBlock.IsNil() {
|
||||
hb, err := s.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "Could not look up head block")
|
||||
}
|
||||
root, err := hb.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "Could not compute head block root")
|
||||
}
|
||||
if ev.HeadRoot != root {
|
||||
return ev, errors.Wrap(err, "head root changed before payload attribute event handler execution")
|
||||
}
|
||||
ev.HeadBlock = hb
|
||||
payload, err := hb.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "Could not get execution payload for head block")
|
||||
}
|
||||
ev.ParentBlockHash = payload.BlockHash()
|
||||
ev.ParentBlockNumber = payload.BlockNumber()
|
||||
var err error
|
||||
|
||||
if !needsFill(ev) {
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
attr := ev.Attributer
|
||||
if attr == nil || attr.IsEmpty() {
|
||||
attr, err := s.computePayloadAttributes(ctx, ev)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "Could not compute payload attributes")
|
||||
}
|
||||
ev.Attributer = attr
|
||||
ev.HeadState, err = s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
return ev, nil
|
||||
|
||||
ev.HeadBlock, err = s.HeadFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not look up head block")
|
||||
}
|
||||
ev.HeadRoot, err = ev.HeadBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not compute head block root")
|
||||
}
|
||||
pr := ev.HeadBlock.Block().ParentRoot()
|
||||
ev.ParentBlockRoot = pr[:]
|
||||
|
||||
hsr, err := ev.HeadState.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not compute latest block header root")
|
||||
}
|
||||
|
||||
pse := slots.ToEpoch(ev.ProposalSlot)
|
||||
st := ev.HeadState
|
||||
if slots.ToEpoch(st.Slot()) != pse {
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, hsr[:], ev.ProposalSlot)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not run process blocks on head state into the proposal slot epoch")
|
||||
}
|
||||
}
|
||||
ev.ProposerIndex, err = helpers.BeaconProposerIndexAtSlot(ctx, st, ev.ProposalSlot)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "failed to compute proposer index")
|
||||
}
|
||||
randao, err := helpers.RandaoMix(st, pse)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not get head state randado")
|
||||
}
|
||||
|
||||
payload, err := ev.HeadBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not get execution payload for head block")
|
||||
}
|
||||
ev.ParentBlockHash = payload.BlockHash()
|
||||
ev.ParentBlockNumber = payload.BlockNumber()
|
||||
|
||||
t, err := slots.ToTime(st.GenesisTime(), ev.ProposalSlot)
|
||||
if err != nil {
|
||||
return ev, errors.Wrap(err, "could not get head state slot time")
|
||||
}
|
||||
ev.Attributer, err = s.computePayloadAttributes(ctx, st, hsr, ev.ProposerIndex, uint64(t.Unix()), randao)
|
||||
return ev, err
|
||||
}
|
||||
|
||||
// This event stream is intended to be used by builders and relays.
|
||||
@@ -704,10 +736,7 @@ func (s *Server) payloadAttributesReader(ctx context.Context, ev payloadattribut
|
||||
ctx, cancel := context.WithTimeout(ctx, payloadAttributeTimeout)
|
||||
edc := make(chan asyncPayloadAttrData)
|
||||
go func() {
|
||||
d := asyncPayloadAttrData{
|
||||
version: version.String(ev.HeadState.Version()),
|
||||
}
|
||||
|
||||
d := asyncPayloadAttrData{}
|
||||
defer func() {
|
||||
edc <- d
|
||||
}()
|
||||
@@ -716,6 +745,7 @@ func (s *Server) payloadAttributesReader(ctx context.Context, ev payloadattribut
|
||||
d.err = errors.Wrap(err, "Could not fill event data")
|
||||
return
|
||||
}
|
||||
d.version = version.String(ev.HeadBlock.Version())
|
||||
attributesBytes, err := marshalAttributes(ev.Attributer)
|
||||
if err != nil {
|
||||
d.err = errors.Wrap(err, "errors marshaling payload attributes to json")
|
||||
|
||||
@@ -2,6 +2,7 @@ package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -557,6 +559,110 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestFillEventData(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
t.Run("AlreadyFilledData_ShouldShortCircuitWithoutError", func(t *testing.T) {
|
||||
st, err := util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
b, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(ð.SignedBeaconBlockBellatrix{}))
|
||||
require.NoError(t, err)
|
||||
attributor, err := payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
alreadyFilled := payloadattribute.EventData{
|
||||
HeadState: st,
|
||||
HeadBlock: b,
|
||||
HeadRoot: [32]byte{1, 2, 3},
|
||||
Attributer: attributor,
|
||||
ParentBlockRoot: []byte{1, 2, 3},
|
||||
ParentBlockHash: []byte{4, 5, 6},
|
||||
}
|
||||
srv := &Server{} // No real HeadFetcher needed here since it won't be called.
|
||||
result, err := srv.fillEventData(ctx, alreadyFilled)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, alreadyFilled, result)
|
||||
})
|
||||
t.Run("Electra PartialData_ShouldFetchHeadStateAndBlock", func(t *testing.T) {
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
valCount := 10
|
||||
setActiveValidators(t, st, valCount)
|
||||
inactivityScores := make([]uint64, valCount)
|
||||
for i := range inactivityScores {
|
||||
inactivityScores[i] = 10
|
||||
}
|
||||
require.NoError(t, st.SetInactivityScores(inactivityScores))
|
||||
b, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockElectra(ð.SignedBeaconBlockElectra{}))
|
||||
require.NoError(t, err)
|
||||
attributor, err := payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Create an event data object missing certain fields:
|
||||
partial := payloadattribute.EventData{
|
||||
// The presence of a nil HeadState, nil HeadBlock, zeroed HeadRoot, etc.
|
||||
// will cause fillEventData to try to fill the values.
|
||||
ProposalSlot: 42, // different epoch from current slot
|
||||
Attributer: attributor, // Must be Bellatrix or later
|
||||
}
|
||||
currentSlot := primitives.Slot(0)
|
||||
// to avoid slot processing
|
||||
require.NoError(t, st.SetSlot(currentSlot+1))
|
||||
mockChainService := &mockChain.ChainService{
|
||||
Root: make([]byte, 32),
|
||||
State: st,
|
||||
Block: b,
|
||||
Slot: ¤tSlot,
|
||||
}
|
||||
|
||||
stn := mockChain.NewEventFeedWrapper()
|
||||
opn := mockChain.NewEventFeedWrapper()
|
||||
srv := &Server{
|
||||
StateNotifier: &mockChain.SimpleNotifier{Feed: stn},
|
||||
OperationNotifier: &mockChain.SimpleNotifier{Feed: opn},
|
||||
HeadFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
EventWriteTimeout: testEventWriteTimeout,
|
||||
}
|
||||
|
||||
filled, err := srv.fillEventData(ctx, partial)
|
||||
require.NoError(t, err, "expected successful fill of partial event data")
|
||||
|
||||
// Verify that fields have been updated from the mock data:
|
||||
require.NotNil(t, filled.HeadState, "HeadState should be assigned")
|
||||
require.NotNil(t, filled.HeadBlock, "HeadBlock should be assigned")
|
||||
require.NotEqual(t, [32]byte{}, filled.HeadRoot, "HeadRoot should no longer be zero")
|
||||
require.NotEmpty(t, filled.ParentBlockRoot, "ParentBlockRoot should be filled")
|
||||
require.NotEmpty(t, filled.ParentBlockHash, "ParentBlockHash should be filled")
|
||||
require.Equal(t, uint64(0), filled.ParentBlockNumber, "ParentBlockNumber must match mock block")
|
||||
|
||||
// Check that a valid Attributer was set:
|
||||
require.NotNil(t, filled.Attributer, "Should have a valid payload attributes object")
|
||||
require.Equal(t, false, filled.Attributer.IsEmpty(), "Attributer should not be empty after fill")
|
||||
})
|
||||
}
|
||||
|
||||
func setActiveValidators(t *testing.T, st state.BeaconState, count int) {
|
||||
balances := make([]uint64, count)
|
||||
validators := make([]*eth.Validator, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
pubKey := make([]byte, params.BeaconConfig().BLSPubkeyLength)
|
||||
binary.LittleEndian.PutUint64(pubKey, uint64(i))
|
||||
balances[i] = uint64(i)
|
||||
validators = append(validators, ð.Validator{
|
||||
PublicKey: pubKey,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
})
|
||||
}
|
||||
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetBalances(balances))
|
||||
}
|
||||
|
||||
func TestStuckReaderScenarios(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
|
||||
@@ -636,6 +636,16 @@ func (s *Server) ProduceSyncCommitteeContribution(w http.ResponseWriter, r *http
|
||||
ctx, span := trace.StartSpan(r.Context(), "validator.ProduceSyncCommitteeContribution")
|
||||
defer span.End()
|
||||
|
||||
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if isOptimistic {
|
||||
httputil.HandleError(w, "Beacon node is currently syncing and not serving request on that endpoint", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
_, index, ok := shared.UintFromQuery(w, r, "subcommittee_index", true)
|
||||
if !ok {
|
||||
return
|
||||
|
||||
@@ -1584,7 +1584,8 @@ func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
OptimisticModeFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
url := "http://example.com?slot=1&subcommittee_index=1&beacon_block_root=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
@@ -1672,7 +1673,8 @@ func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
OptimisticModeFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
server.ProduceSyncCommitteeContribution(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
@@ -1680,6 +1682,26 @@ func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp2))
|
||||
require.ErrorContains(t, "No subcommittee messages found", errors.New(writer.Body.String()))
|
||||
})
|
||||
t.Run("Optimistic returns 503", func(t *testing.T) {
|
||||
url := "http://example.com?slot=1&subcommittee_index=1&beacon_block_root=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
syncCommitteePool = synccommittee.NewStore()
|
||||
server = Server{
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
OptimisticModeFetcher: &mockChain.ChainService{
|
||||
Optimistic: true,
|
||||
},
|
||||
}
|
||||
server.ProduceSyncCommitteeContribution(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_RegisterValidator(t *testing.T) {
|
||||
|
||||
@@ -21,4 +21,4 @@ between states.
|
||||
`finalizerCleanup()` (applies only to multi-value slice fields).
|
||||
- If the field is a slice, add it to the field map in `types.go`.
|
||||
- If the field is a slice, update the `fieldConverters()` function in `/beacon-chain/state/fieldtrie/field_trie_helpers.go`. The exact implementation will vary
|
||||
depending on a few factors (is the field similar to an existing one, is it a multi-value slice etc.)
|
||||
depending on a few factors (is the field similar to an existing one, is it a multi-value slice etc.)
|
||||
@@ -7,12 +7,13 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v5/container/leaky-bucket"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
|
||||
const defaultBurstLimit = 5
|
||||
|
||||
@@ -15,6 +15,8 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/abool"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
@@ -45,7 +47,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/trailofbits/go-mutexasserts"
|
||||
)
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add SSZ support to light client finality and optimistic APIs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14836)
|
||||
- Add SSZ support to light client finality and optimistic APIs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14836)
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Add pre-commit hooks for faster feedback loop
|
||||
3
changelog/james-prysm_add-op-check-sync-committee.md
Normal file
3
changelog/james-prysm_add-op-check-sync-committee.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- /eth/v1/validator/sync_committee_contribution should check for optimistic status and return a 503 if it's optimistic.
|
||||
@@ -1,3 +1,4 @@
|
||||
### Ignored
|
||||
|
||||
- Cleanup single attestation code for readability.
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
### Fixed
|
||||
|
||||
- refactored publish block and block ssz functions to fix gocognit
|
||||
- refactored publish blinded block and blinded block ssz to correctly deal with version headers and sent blocks
|
||||
- refactored publish blinded block and blinded block ssz to correctly deal with version headers and sent blocks
|
||||
@@ -4,4 +4,4 @@
|
||||
|
||||
### Fixed
|
||||
|
||||
- fixed gocognit in block conversions between json and proto types
|
||||
- fixed gocognit in block conversions between json and proto types
|
||||
@@ -1,3 +1,3 @@
|
||||
### Added
|
||||
|
||||
- add log to committee index when committeebits are not the expected length of 1
|
||||
- add log to committee index when committeebits are not the expected length of 1
|
||||
@@ -1,3 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- deprecate beacon api endpoints based on [3.0.0 release](https://github.com/ethereum/beacon-APIs/pull/506) for electra
|
||||
- deprecate beacon api endpoints based on [3.0.0 release](https://github.com/ethereum/beacon-APIs/pull/506) for electra
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- fixed max and target blob per block from static to dynamic values
|
||||
- fixed max and target blob per block from static to dynamic values
|
||||
3
changelog/james-prysm_fix-blobl-subnet-config.md
Normal file
3
changelog/james-prysm_fix-blobl-subnet-config.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- fixed /eth/v1/config/spec displays BLOB_SIDECAR_SUBNET_COUNT,BLOB_SIDECAR_SUBNET_COUNT_ELECTRA
|
||||
4
changelog/james-prysm_fix-builder-e2e.md
Normal file
4
changelog/james-prysm_fix-builder-e2e.md
Normal file
@@ -0,0 +1,4 @@
|
||||
### Fixed
|
||||
|
||||
- Fixes printing superfluous response.WriteHeader call from error in builder.
|
||||
- Fixes e2e run with builder having wrong gaslimit header due to not being set on eth1 nodes.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- adding in content type and accept headers for builder API call on registration
|
||||
- adding in content type and accept headers for builder API call on registration
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- cosmetic fix for post electra validator logs displaying attestation committee information correctly.
|
||||
- cosmetic fix for post electra validator logs displaying attestation committee information correctly.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- fix inserting the wrong committee index into the seen cache for electra attestations
|
||||
- fix inserting the wrong committee index into the seen cache for electra attestations
|
||||
@@ -1,3 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- execution requests errors on ssz length have been improved
|
||||
- execution requests errors on ssz length have been improved
|
||||
3
changelog/james-prysm_update-default-gas-limit.md
Normal file
3
changelog/james-prysm_update-default-gas-limit.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Updated default gas limit from 30M to 36M
|
||||
@@ -1,3 +1,3 @@
|
||||
### Added
|
||||
|
||||
- enable web3signer E2E for electra
|
||||
- enable web3signer E2E for electra
|
||||
2
changelog/kasey_delete-block-idx.md
Normal file
2
changelog/kasey_delete-block-idx.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Ensure that deleting a block from the database clears its entry in the slot->root db index.
|
||||
2
changelog/kasey_event-slot-fix.md
Normal file
2
changelog/kasey_event-slot-fix.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Fixed a bug in the event stream handler when processing payload attribute events where the timestamp and slot of the event would be based on the head rather than the current slot.
|
||||
@@ -1,2 +1,2 @@
|
||||
### Fixed
|
||||
- Fixed the `bazel run //:gazelle` command in `DEPENDENCIES.md`.
|
||||
- Fixed the `bazel run //:gazelle` command in `DEPENDENCIES.md`.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Removed
|
||||
|
||||
- Remove Fulu state and block
|
||||
- Remove Fulu state and block
|
||||
@@ -1,2 +1,2 @@
|
||||
### Removed
|
||||
- Removed the log summarizing all started services.
|
||||
- Removed the log summarizing all started services.
|
||||
@@ -5,4 +5,4 @@
|
||||
|
||||
### Changed
|
||||
|
||||
- Tracked validators cache: Remove validators from the cache if not seen after 1 hour.
|
||||
- Tracked validators cache: Remove validators from the cache if not seen after 1 hour.
|
||||
@@ -1,3 +1,3 @@
|
||||
## Changed
|
||||
|
||||
- `--validators-registration-batch-size`: Change default value from `0` to `200`.
|
||||
- `--validators-registration-batch-size`: Change default value from `0` to `200`.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Added deposit request testing for electra.
|
||||
- Added deposit request testing for electra.
|
||||
@@ -1,4 +1,4 @@
|
||||
### Added
|
||||
|
||||
- Enable multiclient E2E for electra
|
||||
- Enable Scenario E2E tests with electra
|
||||
- Enable Scenario E2E tests with electra
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Check for the correct attester slashing type during gossip validation.
|
||||
- Check for the correct attester slashing type during gossip validation.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Allow any block type to be unmarshaled rather than only phase0 blocks in `slotByBlockRoot`.
|
||||
- Allow any block type to be unmarshaled rather than only phase0 blocks in `slotByBlockRoot`.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Only check for electra related engine methods if electra is active.
|
||||
- Only check for electra related engine methods if electra is active.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix E2E Deposit Activation Evaluator for Electra.
|
||||
- Fix E2E Deposit Activation Evaluator for Electra.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix E2E Process Deposit Evaluator for Electra.
|
||||
- Fix E2E Process Deposit Evaluator for Electra.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Log execution requests in each block.
|
||||
- Log execution requests in each block.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Updates blst to v3.14.0 and fixes the references in our deps.bzl file.
|
||||
- Updates blst to v3.14.0 and fixes the references in our deps.bzl file.
|
||||
3
changelog/potuz_last_validated.md
Normal file
3
changelog/potuz_last_validated.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- When starting a node, check that the last validated checkpoint has zero as root and return the genesis block root
|
||||
@@ -1,3 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Add logs for RPC handlers added/removed at forks.
|
||||
- Add logs for RPC handlers added/removed at forks.
|
||||
3
changelog/potuz_sync_from_head.md
Normal file
3
changelog/potuz_sync_from_head.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Added a feature flag to sync from an arbitrary beacon block root at startup.
|
||||
3
changelog/pvl_bls-ctx.md
Normal file
3
changelog/pvl_bls-ctx.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Broadcasting BLS to execution changes should not use the request context in a go routine. Use context.Background() for the broadcasting go routine.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Decompose Electra block attestations to prevent redundant packing.
|
||||
- Decompose Electra block attestations to prevent redundant packing.
|
||||
3
changelog/radek_ignore-bit-error.md
Normal file
3
changelog/radek_ignore-bit-error.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Ignore errors from `hasSeenBit` and don't pack unaggregated attestations.
|
||||
3
changelog/radek_minor-attestation-tweaks.md
Normal file
3
changelog/radek_minor-attestation-tweaks.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Handle unaggregated attestations when decomposing Electra block attestations.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Dedicated processing of `SingleAttestation` in the monitor service.
|
||||
- Dedicated processing of `SingleAttestation` in the monitor service.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Don't use MaxCover for Electra on-chain attestations.
|
||||
- Don't use MaxCover for Electra on-chain attestations.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Rename files in `beacon-chain/operations/slashings`.
|
||||
- Rename files in `beacon-chain/operations/slashings`.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add endpoint for getting pending deposits.
|
||||
- Add endpoint for getting pending deposits.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add acceptable address types for static peers
|
||||
- Add acceptable address types for static peers
|
||||
@@ -1,3 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add request hash to header for builder: executable data to block
|
||||
- Add request hash to header for builder: executable data to block
|
||||
@@ -1,3 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Add blobs by range electra test
|
||||
- Add blobs by range electra test
|
||||
@@ -1,3 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Add more debugging information to mismatch fork digest error message
|
||||
- Add more debugging information to mismatch fork digest error message
|
||||
@@ -1,3 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Validate blob sidecar re-order signature and bad parent block.
|
||||
- Validate blob sidecar re-order signature and bad parent block.
|
||||
@@ -1,3 +1,3 @@
|
||||
### Ignored
|
||||
|
||||
- Add more debugging information to validate range debug message
|
||||
- Add more debugging information to validate range debug message
|
||||
@@ -1 +1 @@
|
||||
[{"message":{"validator_index":"0","from_bls_pubkey":"0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c","to_execution_address":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"0xb6e640f0fc58e9f22585dbf434b6a0e8fc36b98e2f2a963e158716cfc84034141289f7898027de1ec56754937f1a837e01c7b066a6a56af7a379f8aec823d050788a5ecc799e9bc39f73d45b7c389c961cbaace61823e4c7bf2f93bd06c03127"},{"message":{"validator_index":"1","from_bls_pubkey":"0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b","to_execution_address":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"0xa97103e15d3dbdaa75fb15cea782e4a11329eea77d155864ec682d7907b3b70c7771960bef7be1b1c4e08fe735888b950c1a22053f6049b35736f48e6dd018392efa3896c9e427ea4e100e86e9131b5ea2673388a4bf188407a630ba405b7dc5"}]
|
||||
[{"message":{"validator_index":"0","from_bls_pubkey":"0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c","to_execution_address":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"0xb6e640f0fc58e9f22585dbf434b6a0e8fc36b98e2f2a963e158716cfc84034141289f7898027de1ec56754937f1a837e01c7b066a6a56af7a379f8aec823d050788a5ecc799e9bc39f73d45b7c389c961cbaace61823e4c7bf2f93bd06c03127"},{"message":{"validator_index":"1","from_bls_pubkey":"0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b","to_execution_address":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b"},"signature":"0xa97103e15d3dbdaa75fb15cea782e4a11329eea77d155864ec682d7907b3b70c7771960bef7be1b1c4e08fe735888b950c1a22053f6049b35736f48e6dd018392efa3896c9e427ea4e100e86e9131b5ea2673388a4bf188407a630ba405b7dc5"}]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user