Compare commits

..

1 Commits

Author SHA1 Message Date
Raul Jordan
18fbc8aecb sync logs 2022-08-17 21:31:49 -04:00
215 changed files with 2526 additions and 5182 deletions

View File

@@ -33,7 +33,7 @@ jobs:
- name: Run Gosec Security Scanner
run: | # https://github.com/securego/gosec/issues/469
export PATH=$PATH:$(go env GOPATH)/bin
go install github.com/securego/gosec/v2/cmd/gosec@v2.12.0
go install github.com/securego/gosec/v2/cmd/gosec@latest
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
lint:
@@ -53,7 +53,6 @@ jobs:
uses: golangci/golangci-lint-action@v3
with:
version: v1.47.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
name: Build

View File

@@ -13,7 +13,7 @@ linters:
enable:
- gofmt
- goimports
- unused
- deadcode
- errcheck
- gosimple
- gocognit

View File

@@ -2,8 +2,8 @@
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![Consensus_Spec_Version 1.2.0-rc.3](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.2.0.rc.3-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.3)
[![Execution_API_Version 1.0.0-beta.1](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.beta.1-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-beta.1/src/engine)
[![Consensus_Spec_Version 1.2.0-rc.1](https://img.shields.io/badge/Consensus%20Spec%20Version-v1.2.0.rc.1-blue.svg)](https://github.com/ethereum/consensus-specs/tree/v1.2.0-rc.1)
[![Execution_API_Version 1.0.0-alpha.9](https://img.shields.io/badge/Execution%20API%20Version-v1.0.0.alpha.9-blue.svg)](https://github.com/ethereum/execution-apis/tree/v1.0.0-alpha.9/src/engine)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
[![GitPOAP Badge](https://public-api.gitpoap.io/v1/repo/prysmaticlabs/prysm/badge)](https://www.gitpoap.io/gh/prysmaticlabs/prysm)

View File

@@ -28,7 +28,7 @@ load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
llvm_toolchain(
name = "llvm_toolchain",
llvm_version = "13.0.1",
llvm_version = "10.0.0",
)
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
@@ -215,7 +215,7 @@ filegroup(
url = "https://github.com/eth-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz",
)
consensus_spec_version = "v1.2.0-rc.3"
consensus_spec_version = "v1.2.0-rc.2"
bls_test_version = "v0.1.1"
@@ -231,7 +231,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "18ca21497f41042cdbe60e2333b100d218b2994fb514964b9deb23daf615a12f",
sha256 = "eff52923eebbed6e37a5282db5290abe67c68d6aa54302e3db2b0718c3edf867",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
)
@@ -247,7 +247,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "47b8f6fabe39b4a69f13054ba74e26ab51581ddbd359c18cf0f03317474e299c",
sha256 = "6183d39d40ae659347e8bcfa435cbbe6de8c19ab327b61d47f906bb087bc7a67",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
)
@@ -263,7 +263,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "a061efc05429b169393c32dc2633a948269461b0fe681f11d41e170a880dcc71",
sha256 = "894d16608d7d37a8f6206165e6c2b6ffcc45b13152b5f411e9283b005ca9793f",
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
)
@@ -278,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "753d51c6a6cc6df101c897e4bea77f73b271f50aeda74440f412514d4bd88a86",
sha256 = "006674e5d7eee613f1155e154ab97f6b57589ec92e6e3e5f7affd2b53581e907",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -21,13 +21,14 @@ import (
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
sb []byte
bb []byte
st state.BeaconState
b interfaces.SignedBeaconBlock
vu *detect.VersionedUnmarshaler
br [32]byte
sr [32]byte
wsd *WeakSubjectivityData
sb []byte
bb []byte
st state.BeaconState
b interfaces.SignedBeaconBlock
vu *detect.VersionedUnmarshaler
br [32]byte
sr [32]byte
}
// SaveBlock saves the downloaded block to a unique file in the given path.

View File

@@ -95,6 +95,8 @@ func WithTimeout(timeout time.Duration) ClientOpt {
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
hc *http.Client
host string
scheme string
baseURL *url.URL
}

View File

@@ -274,6 +274,9 @@ func non200Err(response *http.Response) error {
if err != nil {
body = "(Unable to read response body.)"
} else {
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
@@ -282,25 +285,13 @@ func non200Err(response *http.Response) error {
log.WithError(ErrNoContent).Debug(msg)
return ErrNoContent
case 400:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrBadRequest).Debug(msg)
return errors.Wrap(ErrBadRequest, errMessage.Message)
case 404:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrNotFound).Debug(msg)
return errors.Wrap(ErrNotFound, errMessage.Message)
case 500:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrNotOK).Debug(msg)
return errors.Wrap(ErrNotOK, errMessage.Message)
default:
log.WithError(ErrNotOK).Debug(msg)
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))
return errors.Wrap(ErrNotOK, errMessage.Message)
}
}

View File

@@ -144,23 +144,6 @@ func TestClient_GetHeader(t *testing.T) {
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNotOK)
hc = &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusNoContent,
Body: io.NopCloser(bytes.NewBuffer([]byte("No header is available."))),
Request: r.Clone(ctx),
}, nil
}),
}
c = &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err = c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNoContent)
hc = &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)

View File

@@ -89,7 +89,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
}
return payloadID, nil
case execution.ErrInvalidPayloadStatus:
forkchoiceUpdatedInvalidNodeCount.Inc()
newPayloadInvalidNodeCount.Inc()
headRoot := arg.headRoot
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash

View File

@@ -1155,18 +1155,6 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
require.Equal(t, false, optimistic)
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
// Checkpoint with a lower epoch
oldCp, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
require.NoError(t, err)
invalidCp := &ethpb.Checkpoint{
Epoch: oldCp.Epoch - 1,
}
// Nothing should happen as we no-op on an invalid checkpoint.
require.NoError(t, service.updateFinalized(ctx, invalidCp))
got, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
require.NoError(t, err)
require.DeepEqual(t, oldCp, got)
}
func TestService_removeInvalidBlockAndState(t *testing.T) {

View File

@@ -17,7 +17,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/math"
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"github.com/sirupsen/logrus"
@@ -88,6 +87,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
return nil
}
// A chain re-org occurred, so we fire an event notifying the rest of the services.
s.headLock.RLock()
oldHeadBlock, err := s.headBlock()
if err != nil {
@@ -98,23 +98,12 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
headSlot := s.HeadSlot()
newHeadSlot := headBlock.Block().Slot()
newStateRoot := headBlock.Block().StateRoot()
// A chain re-org occurred, so we fire an event notifying the rest of the services.
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != oldHeadRoot {
commonRoot, forkSlot, err := s.ForkChoicer().CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
if err != nil {
log.WithError(err).Error("Could not find common ancestor root")
commonRoot = params.BeaconConfig().ZeroHash
}
log.WithFields(logrus.Fields{
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"newRoot": fmt.Sprintf("%#x", newHeadRoot),
"oldSlot": fmt.Sprintf("%d", headSlot),
"oldRoot": fmt.Sprintf("%#x", oldHeadRoot),
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
"distance": headSlot + newHeadSlot - 2*forkSlot,
"depth": math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
}).Info("Chain reorg occurred")
"newSlot": fmt.Sprintf("%d", newHeadSlot),
"oldSlot": fmt.Sprintf("%d", headSlot),
}).Debug("Chain reorg occurred")
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
isOptimistic, err := s.IsOptimistic(ctx)
if err != nil {
return errors.Wrap(err, "could not check if node is optimistically synced")
@@ -123,7 +112,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
Type: statefeed.Reorg,
Data: &ethpbv1.EventChainReorg{
Slot: newHeadSlot,
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
Depth: absoluteSlotDifference,
OldHeadBlock: oldHeadRoot[:],
NewHeadBlock: newHeadRoot[:],
OldHeadState: oldStateRoot,
@@ -344,7 +333,7 @@ func (s *Service) notifyNewHeadEvent(
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
commonAncestorRoot, _, err := s.ForkChoicer().CommonAncestor(ctx, newHeadRoot, orphanedRoot)
commonAncestorRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, newHeadRoot, orphanedRoot)
switch {
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):

View File

@@ -150,8 +150,6 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
require.LogsContain(t, hook, "Chain reorg occurred")
require.LogsContain(t, hook, "distance=1")
require.LogsContain(t, hook, "depth=1")
}
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {

View File

@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusBlocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
@@ -52,15 +51,10 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
}
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensusBlocks.ErrUnsupportedGetter):
case err != nil:
if err != nil {
return err
default:
log = log.WithField("txCount", len(txs))
txsPerSlotCount.Set(float64(len(txs)))
}
log = log.WithField("txCount", len(txs))
}
log.Info("Finished applying state transition")
return nil

View File

@@ -158,47 +158,10 @@ var (
Name: "forkchoice_updated_optimistic_node_count",
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
})
forkchoiceUpdatedInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "forkchoice_updated_invalid_node_count",
Help: "Count the number of invalid nodes after forkchoiceUpdated EE call",
})
txsPerSlotCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "txs_per_slot_count",
Help: "Count the number of txs per slot",
})
missedPayloadIDFilledCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "missed_payload_id_filled_count",
Help: "",
})
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "on_block_processing_milliseconds",
Help: "Total time in milliseconds to complete a call to onBlock()",
})
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "state_transition_processing_milliseconds",
Help: "Total time to call a state transition in onBlock()",
})
processAttsElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "process_attestations_milliseconds",
Help: "Captures latency for process attestations (forkchoice) in milliseconds",
Buckets: []float64{1, 5, 20, 100, 500, 1000},
},
)
newAttHeadElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "new_att_head_milliseconds",
Help: "Captures latency for new attestation head in milliseconds",
Buckets: []float64{1, 5, 20, 100, 500, 1000},
},
)
newBlockHeadElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "new_block_head_milliseconds",
Help: "Captures latency for new block head in milliseconds",
Buckets: []float64{1, 5, 20, 100, 500, 1000},
},
)
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -98,7 +98,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
return invalidBlock{error: err}
}
startTime := time.Now()
b := signed.Block()
preState, err := s.getBlockPreState(ctx, b)
@@ -116,13 +115,10 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
if err != nil {
return err
}
stateTransitionStartTime := time.Now()
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return invalidBlock{error: err}
}
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
if err != nil {
return err
@@ -143,9 +139,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
if err := s.insertBlockToForkchoiceStore(ctx, signed.Block(), blockRoot, postState); err != nil {
return errors.Wrapf(err, "could not insert block %d to fork choice store", signed.Block().Slot())
}
if err := s.handleBlockAttestations(ctx, signed.Block(), postState); err != nil {
return errors.Wrap(err, "could not handle block's attestations")
}
s.InsertSlashingsToForkChoiceStore(ctx, signed.Block().Body().AttesterSlashings())
if isValidPayload {
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
@@ -186,14 +179,10 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
return errors.Wrap(err, msg)
}
start := time.Now()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
log.WithError(err).Warn("Could not update head")
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
if err := s.notifyEngineIfChangedHead(ctx, headRoot); err != nil {
return err
}
@@ -270,11 +259,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
}
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState); err != nil {
return err
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
return s.handleEpochBoundary(ctx, postState)
}
func getStateVersionAndPayload(st state.BeaconState) (int, *enginev1.ExecutionPayloadHeader, error) {
@@ -531,29 +516,6 @@ func (s *Service) insertBlockToForkchoiceStore(ctx context.Context, blk interfac
return nil
}
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
// to gain information on the most current chain.
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.BeaconBlock, st state.BeaconState) error {
// Feed in block's attestations to fork choice store.
for _, a := range blk.Body().Attestations() {
committee, err := helpers.BeaconCommitteeFromState(ctx, st, a.Data.Slot, a.Data.CommitteeIndex)
if err != nil {
return err
}
indices, err := attestation.AttestingIndices(a.AggregationBits, committee)
if err != nil {
return err
}
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
if s.cfg.ForkChoiceStore.HasNode(r) {
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.Data.Target.Epoch)
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
return err
}
}
return nil
}
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
func (s *Service) InsertSlashingsToForkChoiceStore(ctx context.Context, slashings []*ethpb.AttesterSlashing) {

View File

@@ -135,21 +135,11 @@ func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
}
// updateFinalized saves the init sync blocks, finalized checkpoint, migrates
// to cold old states and saves the last validated checkpoint to DB. It returns
// early if the new checkpoint is older than the one on db.
// to cold old states and saves the last validated checkpoint to DB
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "blockChain.updateFinalized")
defer span.End()
// return early if new checkpoint is not newer than the one in DB
currentFinalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
if err != nil {
return err
}
if cp.Epoch <= currentFinalized.Epoch {
return nil
}
// Blocks need to be saved so that we can retrieve finalized block from
// DB when migrating states.
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
@@ -277,7 +267,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
if len(pendingNodes) == 1 {
return nil
}
if root != s.ensureRootNotZeros(finalized.Root) && !s.ForkChoicer().HasNode(root) {
if root != s.ensureRootNotZeros(finalized.Root) {
return errNotDescendantOfFinalized
}
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)

View File

@@ -3006,9 +3006,10 @@ func TestStore_NoViableHead_Reboot_DoublyLinkedTree(t *testing.T) {
headRoot, err := service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
// The node is optimistic now.
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
require.Equal(t, true, optimistic)
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
// Check that the node's justified checkpoint does not agree with the
@@ -3229,9 +3230,10 @@ func TestStore_NoViableHead_Reboot_Protoarray(t *testing.T) {
headRoot, err := service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
// The node is optimistic now
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
require.Equal(t, true, optimistic)
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
// Check that the node's justified checkpoint does not agree with the
@@ -3312,75 +3314,6 @@ func TestStore_NoViableHead_Reboot_Protoarray(t *testing.T) {
require.Equal(t, false, service.ForkChoicer().AllTipsAreInvalid())
}
func TestOnBlock_HandleBlockAttestations(t *testing.T) {
ctx := context.Background()
beaconDB := testDB.SetupDB(t)
opts := []Option{
WithDatabase(beaconDB),
WithAttestationPool(attestations.NewPool()),
WithStateGen(stategen.New(beaconDB)),
WithForkChoiceStore(doublylinkedtree.New()),
WithStateNotifier(&mock.MockStateNotifier{}),
}
service, err := NewService(ctx, opts...)
require.NoError(t, err)
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
require.NoError(t, service.saveGenesisData(ctx, st))
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.onBlock(ctx, wsb, root))
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 2)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
// prepare another block that is not inserted
st3, err := transition.ExecuteStateTransition(ctx, st, wsb)
require.NoError(t, err)
b3, err := util.GenerateFullBlock(st3, keys, util.DefaultBlockGenConfig(), 3)
require.NoError(t, err)
wsb3, err := consensusblocks.NewSignedBeaconBlock(b3)
require.NoError(t, err)
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a := wsb.Block().Body().Attestations()[0]
r := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
require.Equal(t, true, service.cfg.ForkChoiceStore.HasNode(r))
require.Equal(t, 1, len(wsb.Block().Body().Attestations()))
a3 := wsb3.Block().Body().Attestations()[0]
r3 := bytesutil.ToBytes32(a3.Data.BeaconBlockRoot)
require.Equal(t, false, service.cfg.ForkChoiceStore.HasNode(r3))
require.NoError(t, service.handleBlockAttestations(ctx, wsb.Block(), st)) // fine to use the same committe as st
require.Equal(t, 0, service.cfg.AttPool.ForkchoiceAttestationCount())
require.NoError(t, service.handleBlockAttestations(ctx, wsb3.Block(), st3)) // fine to use the same committe as st
require.Equal(t, 1, len(service.cfg.AttPool.BlockAttestations()))
}
// Helper function to simulate the block being on time or delayed for proposer
// boost. It alters the genesisTime tracked by the store.
func driftGenesisTime(s *Service, slot int64, delay int64) {

View File

@@ -147,22 +147,17 @@ func (s *Service) UpdateHead(ctx context.Context) error {
s.processAttestationsLock.Lock()
defer s.processAttestationsLock.Unlock()
start := time.Now()
s.processAttestations(ctx)
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
justified := s.ForkChoicer().JustifiedCheckpoint()
balances, err := s.justifiedBalances.get(ctx, justified.Root)
if err != nil {
return err
}
start = time.Now()
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
s.headLock.RLock()
if s.headRoot() != newHeadRoot {
log.WithFields(logrus.Fields{

View File

@@ -150,6 +150,11 @@ func (s *Service) handlePostBlockOperations(b interfaces.BeaconBlock) error {
return err
}
// Add block attestations to the fork choice pool to compute head.
if err := s.cfg.AttPool.SaveBlockAttestations(b.Body().Attestations()); err != nil {
log.WithError(err).Error("Could not save block attestations for fork choice")
return nil
}
// Mark block exits as seen so we don't include same ones in future blocks.
for _, e := range b.Body().VoluntaryExits() {
s.cfg.ExitPool.MarkIncluded(e)

View File

@@ -76,9 +76,9 @@ func TestService_ReceiveBlock(t *testing.T) {
),
},
check: func(t *testing.T, s *Service) {
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 0 {
if baCount := len(s.cfg.AttPool.BlockAttestations()); baCount != 2 {
t.Errorf("Did not get the correct number of block attestations saved to the pool. "+
"Got %d but wanted %d", baCount, 0)
"Got %d but wanted %d", baCount, 2)
}
},
},

View File

@@ -231,15 +231,14 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
if err := forkChoicer.InsertNode(s.ctx, st, fRoot); err != nil {
return errors.Wrap(err, "could not insert finalized block to forkchoice")
}
if !features.Get().EnableStartOptimistic {
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get last validated checkpoint")
}
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
if err := forkChoicer.SetOptimisticToValid(s.ctx, fRoot); err != nil {
return errors.Wrap(err, "could not set finalized block as validated")
}
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
if err != nil {
return errors.Wrap(err, "could not get last validated checkpoint")
}
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
if err := forkChoicer.SetOptimisticToValid(s.ctx, fRoot); err != nil {
return errors.Wrap(err, "could not set finalized block as validated")
}
}
// not attempting to save initial sync blocks here, because there shouldn't be any until

View File

@@ -26,7 +26,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state/stategen"
v1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusblocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
@@ -529,45 +528,3 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
}
}
func TestChainService_EverythingOptimistic(t *testing.T) {
resetFn := features.InitWithReset(&features.Flags{
EnableStartOptimistic: true,
})
defer resetFn()
beaconDB := testDB.SetupDB(t)
ctx := context.Background()
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, genesis)
finalizedSlot := params.BeaconConfig().SlotsPerEpoch*2 + 1
headBlock := util.NewBeaconBlock()
headBlock.Block.Slot = finalizedSlot
headBlock.Block.ParentRoot = bytesutil.PadTo(genesisRoot[:], 32)
headState, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, headState.SetSlot(finalizedSlot))
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
headRoot, err := headBlock.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
util.SaveBlock(t, ctx, beaconDB, headBlock)
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
attSrv, err := attestations.NewService(ctx, &attestations.Config{})
require.NoError(t, err)
stateGen := stategen.New(beaconDB)
c, err := NewService(ctx, WithDatabase(beaconDB), WithStateGen(stateGen), WithAttestationService(attSrv), WithStateNotifier(&mock.MockStateNotifier{}), WithFinalizedStateAtStartUp(headState))
require.NoError(t, err)
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, &ethpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
require.NoError(t, c.StartFromSavedState(headState))
require.Equal(t, true, c.cfg.ForkChoiceStore.HasNode(headRoot))
op, err := c.cfg.ForkChoiceStore.IsOptimistic(headRoot)
require.NoError(t, err)
require.Equal(t, true, op)
}

View File

@@ -30,7 +30,8 @@ type WeakSubjectivityVerifier struct {
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier.
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
log.Debug("--weak-subjectivity-checkpoint not provided")
log.Info("--weak-subjectivity-checkpoint not provided. Prysm recommends providing a weak subjectivity checkpoint " +
"for nodes synced from genesis, or manual verification of block and state roots for checkpoint sync nodes.")
return &WeakSubjectivityVerifier{
enabled: false,
}, nil

View File

@@ -2,6 +2,7 @@ package builder
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
@@ -65,12 +66,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Is the builder up?
if err := s.c.Status(ctx); err != nil {
log.WithError(err).Error("Failed to check builder status")
} else {
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
return nil, fmt.Errorf("could not connect to builder: %v", err)
}
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
}
return s, nil
}

View File

@@ -185,21 +185,7 @@ func (dc *DepositCache) AllDepositContainers(ctx context.Context) []*ethpb.Depos
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
// Make a shallow copy of the deposits and return that. This way, the
// caller can safely iterate over the returned list of deposits without
// the possibility of new deposits showing up. If we were to return the
// list without a copy, when a new deposit is added to the cache, it
// would also be present in the returned value. This could result in a
// race condition if the list is being iterated over.
//
// It's not necessary to make a deep copy of this list because the
// deposits in the cache should never be modified. It is still possible
// for the caller to modify one of the underlying deposits and modify
// the cache, but that's not a race condition. Also, a deep copy would
// take too long and use too much memory.
deposits := make([]*ethpb.DepositContainer, len(dc.deposits))
copy(deposits, dc.deposits)
return deposits
return dc.deposits
}
// AllDeposits returns a list of historical deposits until the given block number

View File

@@ -55,7 +55,7 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int)
depositCntrs := dc.PendingContainers(ctx, untilBlk)
deposits := make([]*ethpb.Deposit, 0, len(depositCntrs))
var deposits []*ethpb.Deposit
for _, dep := range depositCntrs {
deposits = append(deposits, dep.Deposit)
}
@@ -71,7 +71,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int
dc.depositsLock.RLock()
defer dc.depositsLock.RUnlock()
depositCntrs := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
var depositCntrs []*ethpb.DepositContainer
for _, ctnr := range dc.pendingDeposits {
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
depositCntrs = append(depositCntrs, ctnr)
@@ -139,7 +139,7 @@ func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeInde
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
var cleanDeposits []*ethpb.DepositContainer
for _, dp := range dc.pendingDeposits {
if dp.Index >= merkleTreeIndex {
cleanDeposits = append(cleanDeposits, dp)

View File

@@ -7,7 +7,6 @@ go_library(
"beacon_committee.go",
"block.go",
"genesis.go",
"metrics.go",
"randao.go",
"rewards_penalties.go",
"shuffle.go",

View File

@@ -51,11 +51,10 @@ func ValidateSlotTargetEpoch(data *ethpb.AttestationData) error {
// committee count as an argument allows cheaper computation at run time.
//
// Spec pseudocode definition:
//
// def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
// committee = get_beacon_committee(state, slot, index)
// modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
// return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
// def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
// committee = get_beacon_committee(state, slot, index)
// modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
// return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
modulo := uint64(1)
if committeeCount/params.BeaconConfig().TargetAggregatorsPerCommittee > 1 {
@@ -69,10 +68,9 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
// AggregateSignature returns the aggregated signature of the input attestations.
//
// Spec pseudocode definition:
//
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
// signatures = [attestation.signature for attestation in attestations]
// return bls.Aggregate(signatures)
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
// signatures = [attestation.signature for attestation in attestations]
// return bls.Aggregate(signatures)
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
sigs := make([]bls.Signature, len(attestations))
var err error
@@ -97,15 +95,14 @@ func IsAggregated(attestation *ethpb.Attestation) bool {
//
// Spec pseudocode definition:
// def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
// """
// Compute the correct subnet for an attestation for Phase 0.
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
// """
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
//
// """
// Compute the correct subnet for an attestation for Phase 0.
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
// """
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
//
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
func ComputeSubnetForAttestation(activeValCount uint64, att *ethpb.Attestation) uint64 {
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.Data.CommitteeIndex, att.Data.Slot)
}
@@ -115,15 +112,14 @@ func ComputeSubnetForAttestation(activeValCount uint64, att *ethpb.Attestation)
//
// Spec pseudocode definition:
// def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
// """
// Compute the correct subnet for an attestation for Phase 0.
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
// """
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
//
// """
// Compute the correct subnet for an attestation for Phase 0.
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
// """
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
//
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx types.CommitteeIndex, attSlot types.Slot) uint64 {
slotSinceStart := slots.SinceEpochStarts(attSlot)
comCount := SlotCommitteeCount(activeValCount)
@@ -137,15 +133,13 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx types.Commi
// slots.
//
// Example:
//
// ATTESTATION_PROPAGATION_SLOT_RANGE = 5
// clockDisparity = 24 seconds
// current_slot = 100
// invalid_attestation_slot = 92
// invalid_attestation_slot = 103
// valid_attestation_slot = 98
// valid_attestation_slot = 101
//
// ATTESTATION_PROPAGATION_SLOT_RANGE = 5
// clockDisparity = 24 seconds
// current_slot = 100
// invalid_attestation_slot = 92
// invalid_attestation_slot = 103
// valid_attestation_slot = 98
// valid_attestation_slot = 101
// In the attestation must be within the range of 95 to 102 in the example above.
func ValidateAttestationTime(attSlot types.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := slots.ValidateClock(attSlot, uint64(genesisTime.Unix())); err != nil {
@@ -176,19 +170,13 @@ func ValidateAttestationTime(attSlot types.Slot, genesisTime time.Time, clockDis
lowerBounds := lowerTime.Add(-clockDisparity)
// Verify attestation slot within the time range.
attError := fmt.Errorf(
"attestation slot %d not within attestation propagation range of %d to %d (current slot)",
attSlot,
lowerBoundsSlot,
currentSlot,
)
if attTime.Before(lowerBounds) {
attReceivedTooEarlyCount.Inc()
return attError
}
if attTime.After(upperBounds) {
attReceivedTooLateCount.Inc()
return attError
if attTime.Before(lowerBounds) || attTime.After(upperBounds) {
return fmt.Errorf(
"attestation slot %d not within attestation propagation range of %d to %d (current slot)",
attSlot,
lowerBoundsSlot,
currentSlot,
)
}
return nil
}

View File

@@ -16,7 +16,7 @@ func UpdateGenesisEth1Data(state state.BeaconState, deposits []*ethpb.Deposit, e
return nil, errors.New("no eth1data provided for genesis state")
}
leaves := make([][]byte, 0, len(deposits))
var leaves [][]byte
for _, deposit := range deposits {
if deposit == nil || deposit.Data == nil {
return nil, fmt.Errorf("nil deposit or deposit with nil data cannot be processed: %v", deposit)

View File

@@ -1,17 +0,0 @@
package helpers
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
attReceivedTooEarlyCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "attestation_too_early_total",
Help: "Increased when an attestation is considered too early",
})
attReceivedTooLateCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "attestation_too_late_total",
Help: "Increased when an attestation is considered too late",
})
)

View File

@@ -57,7 +57,6 @@ go_library(
"//monitoring/tracing:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",

View File

@@ -55,14 +55,6 @@ var (
Name: "validator_entry_cache_delete_total",
Help: "The total number of cache deletes on the validator entry cache.",
})
stateReadingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "db_beacon_state_reading_milliseconds",
Help: "Milliseconds it takes to read a beacon state from the DB",
})
stateSavingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "db_beacon_state_saving_milliseconds",
Help: "Milliseconds it takes to save a beacon state to the DB",
})
)
// BlockCacheSize specifies 1000 slots worth of blocks cached, which

View File

@@ -20,7 +20,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time"
"github.com/prysmaticlabs/prysm/v3/time/slots"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
@@ -31,7 +30,6 @@ import (
func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.State")
defer span.End()
startTime := time.Now()
enc, err := s.stateBytes(ctx, blockRoot)
if err != nil {
return nil, err
@@ -46,12 +44,7 @@ func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconStat
return nil, valErr
}
st, err := s.unmarshalState(ctx, enc, valEntries)
if err != nil {
return nil, err
}
stateReadingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return st, err
return s.unmarshalState(ctx, enc, valEntries)
}
// StateOrError is just like State(), except it only returns a non-error response
@@ -134,7 +127,6 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
if states == nil {
return errors.New("nil state")
}
startTime := time.Now()
multipleEncs := make([][]byte, len(states))
for i, st := range states {
stateBytes, err := marshalState(ctx, st)
@@ -144,7 +136,7 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
multipleEncs[i] = stateBytes
}
if err := s.db.Update(func(tx *bolt.Tx) error {
return s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateBucket)
for i, rt := range blockRoots {
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
@@ -156,11 +148,7 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
}
}
return nil
}); err != nil {
return err
}
stateSavingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
})
}
type withValidators interface {
@@ -772,10 +760,8 @@ func createStateIndicesFromStateSlot(ctx context.Context, slot types.Slot) map[s
// Only following states would be kept:
// 1.) state_slot % archived_interval == 0. (e.g. archived_interval=2048, states with slot 2048, 4096... etc)
// 2.) archived_interval - archived_interval/3 < state_slot % archived_interval
//
// (e.g. archived_interval=2048, states with slots after 1365).
// This is to tolerate skip slots. Not every state lays on the boundary.
//
// (e.g. archived_interval=2048, states with slots after 1365).
// This is to tolerate skip slots. Not every state lays on the boundary.
// 3.) state with current finalized root
// 4.) unfinalized States
func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error {

View File

@@ -24,18 +24,12 @@ var (
configMismatchLog = "Configuration mismatch between your execution client and Prysm. " +
"Please check your execution client and restart it with the proper configuration. If this is not done, " +
"your node will not be able to complete the proof-of-stake transition"
needsEnginePortLog = "Could not check execution client configuration. " +
"You are probably connecting to your execution client on the wrong port. For the Ethereum " +
"merge, you will need to connect to your " +
"execution client on port 8551 rather than 8545. This is known as the 'engine API' port and needs to be " +
"authenticated if connecting via HTTP. See our documentation on how to set up this up here " +
"https://docs.prylabs.network/docs/execution-node/authentication"
)
// Checks the transition configuration between Prysm and the connected execution node to ensure
// there are no differences in terminal block difficulty and block hash.
// If there are any discrepancies, we must log errors to ensure users can resolve
// the problem and be ready for the merge transition.
//the problem and be ready for the merge transition.
func (s *Service) checkTransitionConfiguration(
ctx context.Context, blockNotifications chan *feed.Event,
) {
@@ -54,14 +48,10 @@ func (s *Service) checkTransitionConfiguration(
}
err := s.ExchangeTransitionConfiguration(ctx, cfg)
if err != nil {
switch {
case errors.Is(err, ErrConfigMismatch):
if errors.Is(err, ErrConfigMismatch) {
log.WithError(err).Fatal(configMismatchLog)
case errors.Is(err, ErrMethodNotFound):
log.WithError(err).Error(needsEnginePortLog)
default:
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
}
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
}
// We poll the execution client to see if the transition configuration has changed.
@@ -125,9 +115,6 @@ func (s *Service) handleExchangeConfigurationError(err error) {
s.runError = err
log.WithError(err).Error(configMismatchLog)
return
} else if errors.Is(err, ErrMethodNotFound) {
log.WithError(err).Error(needsEnginePortLog)
return
}
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
}

View File

@@ -537,31 +537,22 @@ func handleRPCError(err error) error {
}
switch e.ErrorCode() {
case -32700:
errParseCount.Inc()
return ErrParse
case -32600:
errInvalidRequestCount.Inc()
return ErrInvalidRequest
case -32601:
errMethodNotFoundCount.Inc()
return ErrMethodNotFound
case -32602:
errInvalidParamsCount.Inc()
return ErrInvalidParams
case -32603:
errInternalCount.Inc()
return ErrInternal
case -38001:
errUnknownPayloadCount.Inc()
return ErrUnknownPayload
case -38002:
errInvalidForkchoiceStateCount.Inc()
return ErrInvalidForkchoiceState
case -38003:
errInvalidPayloadAttributesCount.Inc()
return ErrInvalidPayloadAttributes
case -32000:
errServerErrorCount.Inc()
// Only -32000 status codes are data errors in the RPC specification.
errWithData, ok := err.(rpc.DataError)
if !ok {

View File

@@ -403,10 +403,6 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
}
}
s.latestEth1DataLock.RLock()
lastReqBlock := s.latestEth1Data.LastRequestedBlock
s.latestEth1DataLock.RUnlock()
for _, filterLog := range logs {
if filterLog.BlockNumber > currentBlockNum {
if err := s.checkHeaderRange(ctx, currentBlockNum, filterLog.BlockNumber-1, headersMap, requestHeaders); err != nil {
@@ -419,13 +415,6 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
currentBlockNum = filterLog.BlockNumber
}
if err := s.ProcessLog(ctx, filterLog); err != nil {
// In the event the execution client gives us a garbled/bad log
// we reset the last requested block to the previous valid block range. This
// prevents the beacon from advancing processing of logs to another range
// in the event of an execution client failure.
s.latestEth1DataLock.Lock()
s.latestEth1Data.LastRequestedBlock = lastReqBlock
s.latestEth1DataLock.Unlock()
return 0, 0, err
}
}

View File

@@ -31,42 +31,6 @@ var (
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
},
)
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_parse_error_count",
Help: "The number of errors that occurred while parsing execution payload",
})
errInvalidRequestCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_invalid_request_count",
Help: "The number of errors that occurred due to invalid request",
})
errMethodNotFoundCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_method_not_found_count",
Help: "The number of errors that occurred due to method not found",
})
errInvalidParamsCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_invalid_params_count",
Help: "The number of errors that occurred due to invalid params",
})
errInternalCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_internal_error_count",
Help: "The number of errors that occurred due to internal error",
})
errUnknownPayloadCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_unknown_payload_count",
Help: "The number of errors that occurred due to unknown payload",
})
errInvalidForkchoiceStateCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_invalid_forkchoice_state_count",
Help: "The number of errors that occurred due to invalid forkchoice state",
})
errInvalidPayloadAttributesCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_invalid_payload_attributes_count",
Help: "The number of errors that occurred due to invalid payload attributes",
})
errServerErrorCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_server_error_count",
Help: "The number of errors that occurred due to server error",
})
reconstructedExecutionPayloadCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "reconstructed_execution_payload_count",
Help: "Count the number of execution payloads that are reconstructed using JSON-RPC from payload headers",

View File

@@ -36,14 +36,6 @@ func WithHttpEndpointAndJWTSecret(endpointString string, secret []byte) Option {
}
}
// WithHeaders adds headers to the execution node JSON-RPC requests.
func WithHeaders(headers []string) Option {
return func(s *Service) error {
s.cfg.headers = headers
return nil
}
}
// WithDepositContractAddress for the deposit contract.
func WithDepositContractAddress(addr common.Address) Option {
return func(s *Service) error {

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"net/url"
"strings"
"time"
"github.com/ethereum/go-ethereum/ethclient"
@@ -38,13 +37,7 @@ func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpo
// Ensure we have the correct chain and deposit IDs.
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
client.Close()
errStr := err.Error()
if strings.Contains(errStr, "401 Unauthorized") {
errStr = "could not verify execution chain ID as your connection is not authenticated. " +
"If connecting to your execution client via HTTP, you will need to set up JWT authentication. " +
"See our documentation here https://docs.prylabs.network/docs/execution-node/authentication"
}
return errors.Wrap(err, errStr)
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
}
s.updateConnectedETH1(true)
s.runError = nil
@@ -72,6 +65,7 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
currClient := s.rpcClient
if err := s.setupExecutionClientConnections(ctx, s.cfg.currHttpEndpoint); err != nil {
errorLogger(err, "Could not connect to execution client endpoint")
s.retryExecutionClientConnection(ctx, err)
continue
}
// Close previous client, if connection was successful.
@@ -120,7 +114,7 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
if err != nil {
return nil, err
}
case "", "ipc":
case "":
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
if err != nil {
return nil, err
@@ -135,16 +129,6 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
}
client.SetHeader("Authorization", header)
}
for _, h := range s.cfg.headers {
if h != "" {
keyValue := strings.Split(h, "=")
if len(keyValue) < 2 {
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
continue
}
client.SetHeader(keyValue[0], strings.Join(keyValue[1:], "="))
}
}
return client, nil
}

View File

@@ -66,6 +66,10 @@ var (
logThreshold = 8
// period to log chainstart related information
logPeriod = 1 * time.Minute
// threshold of how old we will accept an eth1 node's head to be.
eth1Threshold = 20 * time.Minute
// error when eth1 node is too far behind.
errFarBehind = errors.Errorf("eth1 head is more than %s behind from current wall clock time", eth1Threshold.String())
)
// ChainStartFetcher retrieves information pertaining to the chain start event
@@ -124,7 +128,6 @@ type config struct {
eth1HeaderReqLimit uint64
beaconNodeStatsUpdater BeaconNodeStatsUpdater
currHttpEndpoint network.Endpoint
headers []string
finalizedStateAtStartup state.BeaconState
}
@@ -313,6 +316,11 @@ func (s *Service) updateBeaconNodeStats() {
s.cfg.beaconNodeStatsUpdater.Update(bs)
}
func (s *Service) updateCurrHttpEndpoint(endpoint network.Endpoint) {
s.cfg.currHttpEndpoint = endpoint
s.updateBeaconNodeStats()
}
func (s *Service) updateConnectedETH1(state bool) {
s.connectedETH1 = state
s.updateBeaconNodeStats()
@@ -600,6 +608,11 @@ func (s *Service) run(done <-chan struct{}) {
log.WithError(err).Debug("Could not fetch latest eth1 header")
continue
}
if eth1HeadIsBehind(head.Time) {
s.pollConnectionStatus(s.ctx)
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
continue
}
s.processBlockHeader(head)
s.handleETH1FollowDistance()
case <-chainstartTicker.C:
@@ -825,3 +838,11 @@ func dedupEndpoints(endpoints []string) []string {
}
return newEndpoints
}
// Checks if the provided timestamp is beyond the prescribed bound from
// the current wall clock time.
func eth1HeadIsBehind(timestamp uint64) bool {
timeout := prysmTime.Now().Add(-eth1Threshold)
// check that web3 client is syncing
return time.Unix(int64(timestamp), 0).Before(timeout) // lint:ignore uintcast -- timestamp will not exceed int64 in your lifetime.
}

View File

@@ -146,7 +146,7 @@ func TestStart_OK(t *testing.T) {
WithDepositContractAddress(testAcc.ContractAddr),
WithDatabase(beaconDB),
)
require.NoError(t, err, "unable to setup execution service")
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
web3Service = setDefaultMocks(web3Service)
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
web3Service.depositContractCaller, err = contracts.NewDepositContractCaller(testAcc.ContractAddr, testAcc.Backend)
@@ -156,7 +156,7 @@ func TestStart_OK(t *testing.T) {
web3Service.Start()
if len(hook.Entries) > 0 {
msg := hook.LastEntry().Message
want := "Could not connect to execution endpoint"
want := "Could not connect to ETH1.0 chain RPC client"
if strings.Contains(want, msg) {
t.Errorf("incorrect log, expected %s, got %s", want, msg)
}
@@ -752,6 +752,15 @@ func TestService_ValidateDepositContainers(t *testing.T) {
}
}
func TestTimestampIsChecked(t *testing.T) {
timestamp := uint64(time.Now().Unix())
assert.Equal(t, false, eth1HeadIsBehind(timestamp))
// Give an older timestmap beyond threshold.
timestamp = uint64(time.Now().Add(-eth1Threshold).Add(-1 * time.Minute).Unix())
assert.Equal(t, true, eth1HeadIsBehind(timestamp))
}
func TestETH1Endpoints(t *testing.T) {
server, firstEndpoint, err := mockExecution.SetupRPCServer()
require.NoError(t, err)

View File

@@ -18,7 +18,6 @@ go_library(
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/eth/v1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -31,7 +31,6 @@ go_library(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
@@ -63,14 +62,12 @@ go_test(
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v3:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -3,7 +3,6 @@ package doublylinkedtree
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
@@ -15,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/prysmaticlabs/prysm/v3/time/slots"
@@ -84,8 +82,7 @@ func (f *ForkChoice) Head(
jc := f.JustifiedCheckpoint()
fc := f.FinalizedCheckpoint()
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(f.store.genesisTime), 0))
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
}
return f.store.head(ctx)
@@ -186,15 +183,11 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
return err
}
jcRoot := bytesutil.ToBytes32(jc.Root)
// Releasing here the checkpoints lock because
// AncestorRoot acquires a lock on nodes and that can
// cause a double lock.
f.store.checkpointsLock.Unlock()
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
f.store.checkpointsLock.Lock()
if root == currentRoot {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
@@ -303,8 +296,7 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
}
// updateBalances updates the balances that directly voted for each block taking into account the
// validators' latest votes. This function requires a lock in Store.nodesLock
// and votesLock
// validators' latest votes. This function requires a lock in Store.nodesLock.
func (f *ForkChoice) updateBalances(newBalances []uint64) error {
for index, vote := range f.votes {
// Skip if validator has been slashed
@@ -432,9 +424,6 @@ func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, parentRoo
// store-tracked list. Votes from these validators are not accounted for
// in forkchoice.
func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.ValidatorIndex) {
f.votesLock.RLock()
defer f.votesLock.RUnlock()
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
// return early if the index was already included:
@@ -444,6 +433,8 @@ func (f *ForkChoice) InsertSlashedIndex(_ context.Context, index types.Validator
f.store.slashedIndices[index] = true
// Subtract last vote from this equivocating validator
f.votesLock.RLock()
defer f.votesLock.RUnlock()
if index >= types.ValidatorIndex(len(f.balances)) {
return
@@ -493,31 +484,30 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
}
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, types.Slot, error) {
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "doublelinkedtree.CommonAncestorRoot")
defer span.End()
// Do nothing if the input roots are the same.
if r1 == r2 {
return r1, nil
}
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
n1, ok := f.store.nodeByRoot[r1]
if !ok || n1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
// Do nothing if the input roots are the same.
if r1 == r2 {
return r1, n1.slot, nil
}
n2, ok := f.store.nodeByRoot[r2]
if !ok || n2 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
for {
if ctx.Err() != nil {
return [32]byte{}, 0, ctx.Err()
return [32]byte{}, ctx.Err()
}
if n1.slot > n2.slot {
n1 = n1.parent
@@ -525,17 +515,17 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
// This should not happen at runtime as the finalized
// node has to be a common ancestor
if n1 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
} else {
n2 = n2.parent
// Reaches the end of the tree and unable to find common ancestor.
if n2 == nil {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
}
if n1 == n2 {
return n1.root, n1.slot, nil
return n1.root, nil
}
}
}
@@ -616,52 +606,3 @@ func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
}
return node.payloadHash
}
// ForkChoiceDump returns a full dump of forkhoice.
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceResponse, error) {
jc := &v1.Checkpoint{
Epoch: f.store.justifiedCheckpoint.Epoch,
Root: f.store.justifiedCheckpoint.Root[:],
}
bjc := &v1.Checkpoint{
Epoch: f.store.bestJustifiedCheckpoint.Epoch,
Root: f.store.bestJustifiedCheckpoint.Root[:],
}
ujc := &v1.Checkpoint{
Epoch: f.store.unrealizedJustifiedCheckpoint.Epoch,
Root: f.store.unrealizedJustifiedCheckpoint.Root[:],
}
fc := &v1.Checkpoint{
Epoch: f.store.finalizedCheckpoint.Epoch,
Root: f.store.finalizedCheckpoint.Root[:],
}
ufc := &v1.Checkpoint{
Epoch: f.store.unrealizedFinalizedCheckpoint.Epoch,
Root: f.store.unrealizedFinalizedCheckpoint.Root[:],
}
nodes := make([]*v1.ForkChoiceNode, 0, f.NodeCount())
var err error
if f.store.treeRootNode != nil {
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
if err != nil {
return nil, err
}
}
var headRoot [32]byte
if f.store.headNode != nil {
headRoot = f.store.headNode.root
}
resp := &v1.ForkChoiceResponse{
JustifiedCheckpoint: jc,
BestJustifiedCheckpoint: bjc,
UnrealizedJustifiedCheckpoint: ujc,
FinalizedCheckpoint: fc,
UnrealizedFinalizedCheckpoint: ufc,
ProposerBoostRoot: f.store.proposerBoostRoot[:],
PreviousProposerBoostRoot: f.store.previousProposerBoostRoot[:],
HeadRoot: headRoot[:],
ForkchoiceNodes: nodes,
}
return resp, nil
}

View File

@@ -208,7 +208,7 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
f.store.nodesLock.Unlock()
@@ -408,85 +408,73 @@ func TestStore_CommonAncestor(t *testing.T) {
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
wantSlot types.Slot
}{
{
name: "Common ancestor between c and b is a",
r1: [32]byte{'c'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between c and d is a",
r1: [32]byte{'c'},
r2: [32]byte{'d'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between c and e is a",
r1: [32]byte{'c'},
r2: [32]byte{'e'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between g and f is c",
r1: [32]byte{'g'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
wantSlot: 2,
},
{
name: "Common ancestor between f and h is c",
r1: [32]byte{'f'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
wantSlot: 2,
},
{
name: "Common ancestor between g and h is c",
r1: [32]byte{'g'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
wantSlot: 2,
},
{
name: "Common ancestor between b and h is a",
r1: [32]byte{'b'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'e'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between i and f is c",
r1: [32]byte{'i'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
wantSlot: 2,
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'j'},
r2: [32]byte{'g'},
wantRoot: [32]byte{'c'},
wantSlot: 2,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
require.Equal(t, tc.wantSlot, gotSlot)
})
}
@@ -509,53 +497,46 @@ func TestStore_CommonAncestor(t *testing.T) {
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
wantSlot types.Slot
}{
{
name: "Common ancestor between a and b is a",
r1: [32]byte{'a'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between b and d is b",
r1: [32]byte{'d'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'b'},
wantSlot: 1,
},
{
name: "Common ancestor between d and a is a",
r1: [32]byte{'d'},
r2: [32]byte{'a'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
require.Equal(t, tc.wantSlot, gotSlot)
})
}
// Equal inputs should return the same root.
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
require.NoError(t, err)
require.Equal(t, [32]byte{'b'}, r)
require.Equal(t, types.Slot(1), s)
// Requesting finalized root (last node) should return the same root.
r, s, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'a'})
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, [32]byte{'a'}, r)
require.Equal(t, types.Slot(0), s)
// Requesting unknown root
_, _, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'z'})
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
_, _, err = f.CommonAncestor(ctx, [32]byte{'z'}, [32]byte{'a'})
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
n := &Node{
slot: 100,
@@ -569,7 +550,7 @@ func TestStore_CommonAncestor(t *testing.T) {
f.store.nodeByRoot[[32]byte{'y'}] = n
// broken link
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
}

View File

@@ -5,10 +5,8 @@ import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
)
// depth returns the length of the path to the root of Fork Choice
@@ -42,9 +40,8 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
return nil
}
// updateBestDescendant updates the best descendant of this node and its
// children. This function assumes the caller has a lock on Store.nodesLock
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) error {
// updateBestDescendant updates the best descendant of this node and its children.
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
@@ -60,10 +57,10 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
if child == nil {
return errors.Wrap(ErrNilNode, "could not update best descendant")
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch)
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
// parent's best child doesn't.
@@ -98,24 +95,18 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) bool {
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
justified := justifiedEpoch == n.justifiedEpoch || justifiedEpoch == 0
finalized := finalizedEpoch == n.finalizedEpoch || finalizedEpoch == 0
if features.Get().EnableDefensivePull && !justified && justifiedEpoch+1 == currentEpoch {
if n.unrealizedJustifiedEpoch+1 >= currentEpoch {
justified = true
finalized = true
}
}
return justified && finalized
}
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) bool {
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
if n.bestDescendant == nil {
return n.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
return n.viableForHead(justifiedEpoch, finalizedEpoch)
}
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
}
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
@@ -124,48 +115,10 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
return ctx.Err()
}
if !n.optimistic {
if !n.optimistic || n.parent == nil {
return nil
}
n.optimistic = false
if n.parent == nil {
return nil
}
n.optimistic = false
return n.parent.setNodeAndParentValidated(ctx)
}
// nodeTreeDump appends to the given list all the nodes descending from this one
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]*v1.ForkChoiceNode, error) {
if ctx.Err() != nil {
return nil, ctx.Err()
}
var parentRoot [32]byte
if n.parent != nil {
parentRoot = n.parent.root
}
thisNode := &v1.ForkChoiceNode{
Slot: n.slot,
Root: n.root[:],
ParentRoot: parentRoot[:],
JustifiedEpoch: n.justifiedEpoch,
FinalizedEpoch: n.finalizedEpoch,
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
Balance: n.balance,
Weight: n.weight,
ExecutionOptimistic: n.optimistic,
ExecutionPayload: n.payloadHash[:],
Timestamp: n.timestamp,
}
nodes = append(nodes, thisNode)
var err error
for _, child := range n.children {
nodes, err = child.nodeTreeDump(ctx, nodes)
if err != nil {
return nil, err
}
}
return nodes, nil
}

View File

@@ -6,7 +6,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v3/testing/assert"
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
@@ -114,7 +113,7 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 100
s.nodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
@@ -134,7 +133,7 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 200
s.nodeByRoot[indexToHash(2)].weight = 100
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
@@ -174,7 +173,7 @@ func TestNode_ViableForHead(t *testing.T) {
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch, 5)
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch)
assert.Equal(t, tc.want, got)
}
}
@@ -198,17 +197,15 @@ func TestNode_LeadsToViableHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3, 5))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3, 5))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3))
}
func TestNode_SetFullyValidated(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
storeNodes := make([]*Node, 6)
storeNodes[0] = f.store.treeRootNode
// insert blocks in the fork pattern (optimistic status in parenthesis)
//
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
@@ -218,25 +215,20 @@ func TestNode_SetFullyValidated(t *testing.T) {
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
storeNodes[1] = f.store.nodeByRoot[blkRoot]
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
storeNodes[2] = f.store.nodeByRoot[blkRoot]
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
storeNodes[3] = f.store.nodeByRoot[blkRoot]
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
storeNodes[4] = f.store.nodeByRoot[blkRoot]
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
storeNodes[5] = f.store.nodeByRoot[blkRoot]
opt, err := f.IsOptimistic(indexToHash(5))
require.NoError(t, err)
@@ -261,22 +253,4 @@ func TestNode_SetFullyValidated(t *testing.T) {
opt, err = f.IsOptimistic(indexToHash(3))
require.NoError(t, err)
require.Equal(t, false, opt)
respNodes := make([]*v1.ForkChoiceNode, 0)
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
require.NoError(t, err)
require.Equal(t, len(respNodes), f.NodeCount())
for i, respNode := range respNodes {
require.Equal(t, storeNodes[i].slot, respNode.Slot)
require.DeepEqual(t, storeNodes[i].root[:], respNode.Root)
require.Equal(t, storeNodes[i].balance, respNode.Balance)
require.Equal(t, storeNodes[i].weight, respNode.Weight)
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)
require.Equal(t, storeNodes[i].justifiedEpoch, respNode.JustifiedEpoch)
require.Equal(t, storeNodes[i].unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
require.Equal(t, storeNodes[i].finalizedEpoch, respNode.FinalizedEpoch)
require.Equal(t, storeNodes[i].unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
require.Equal(t, storeNodes[i].timestamp, respNode.Timestamp)
}
}

View File

@@ -41,14 +41,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
}
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
f.store.checkpointsLock.RLock()
f.store.checkpointsLock.Lock()
bjcp := f.store.bestJustifiedCheckpoint
jcp := f.store.justifiedCheckpoint
fcp := f.store.finalizedCheckpoint
f.store.checkpointsLock.RUnlock()
if bjcp.Epoch > jcp.Epoch {
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
@@ -58,15 +59,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
// loop call here.
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
if r == fcp.Root {
f.store.checkpointsLock.Lock()
f.store.prevJustifiedCheckpoint = jcp
f.store.justifiedCheckpoint = bjcp
f.store.checkpointsLock.Unlock()
}
}
f.store.checkpointsLock.Unlock()
if !features.Get().DisablePullTips {
f.updateUnrealizedCheckpoints()
}

View File

@@ -389,14 +389,3 @@ func TestSetOptimisticToInvalid_ForkAtMerge_bis(t *testing.T) {
})
require.DeepEqual(t, roots, [][32]byte{{'b'}, {'c'}, {'d'}, {'e'}})
}
func TestSetOptimisticToValid(t *testing.T) {
f := setup(1, 1)
op, err := f.IsOptimistic([32]byte{})
require.NoError(t, err)
require.Equal(t, true, op)
require.NoError(t, f.SetOptimisticToValid(context.Background(), [32]byte{}))
op, err = f.IsOptimistic([32]byte{})
require.NoError(t, err)
require.Equal(t, false, op)
}

View File

@@ -95,8 +95,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
if bestDescendant == nil {
bestDescendant = justifiedNode
}
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(s.genesisTime), 0))
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch, currentEpoch) {
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch) {
s.allTipsAreInvalid = true
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch, justified Epoch %d, %d != %d, %d",
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, bestDescendant.justifiedEpoch, s.finalizedCheckpoint.Epoch, s.justifiedCheckpoint.Epoch)
@@ -142,7 +142,6 @@ func (s *Store) insert(ctx context.Context,
unrealizedFinalizedEpoch: finalizedEpoch,
optimistic: true,
payloadHash: payloadHash,
timestamp: uint64(time.Now().Unix()),
}
s.nodeByPayload[payloadHash] = n
@@ -171,11 +170,8 @@ func (s *Store) insert(ctx context.Context,
}
// Update best descendants
s.checkpointsLock.RLock()
jEpoch := s.justifiedCheckpoint.Epoch
fEpoch := s.finalizedCheckpoint.Epoch
s.checkpointsLock.RUnlock()
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
if err := s.treeRootNode.updateBestDescendant(ctx,
s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch); err != nil {
return n, err
}
}

View File

@@ -59,7 +59,6 @@ type Node struct {
weight uint64 // weight of this node: the total balance including children
bestDescendant *Node // bestDescendant node of this node.
optimistic bool // whether the block has been fully validated or not
timestamp uint64 // The timestamp when the node was inserted.
}
// Vote defines an individual validator's vote.

View File

@@ -5,7 +5,6 @@ import (
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/testing/require"
@@ -199,36 +198,31 @@ func TestStore_NoDeadLock(t *testing.T) {
// D justifies and comes late.
//
func TestStore_ForkNextEpoch(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableDefensivePull: true,
})
defer resetCfg()
f := setup(0, 0)
ctx := context.Background()
// Epoch 1 blocks (D does not arrive)
state, blkRoot, err := prepareForkchoiceState(ctx, 92, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 93, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 94, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Epoch 2 blocks
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'e'}, [32]byte{'c'}, [32]byte{'E'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'c'}, [32]byte{'E'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 98, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 99, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 107, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
@@ -240,25 +234,16 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.Equal(t, types.Epoch(0), f.JustifiedCheckpoint().Epoch)
// D arrives late, D is head
state, blkRoot, err = prepareForkchoiceState(ctx, 95, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 2))
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 2}
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 1))
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1}
f.updateUnrealizedCheckpoints()
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'d'}, headRoot)
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
driftGenesisTime(f, 99, 0)
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, types.Epoch(1), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
)
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
@@ -52,7 +51,7 @@ type Getter interface {
ProposerBoost() [fieldparams.RootLength]byte
HasParent(root [32]byte) bool
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([32]byte, error)
CommonAncestor(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, types.Slot, error)
CommonAncestorRoot(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, error)
IsCanonical(root [32]byte) bool
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
FinalizedPayloadBlockHash() [32]byte
@@ -63,7 +62,6 @@ type Getter interface {
NodeCount() int
HighestReceivedBlockSlot() types.Slot
ReceivedBlocksLastEpoch() (uint64, error)
ForkChoiceDump(context.Context) (*v1.ForkChoiceResponse, error)
}
// Setter allows to set forkchoice information

View File

@@ -32,7 +32,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
@@ -64,7 +63,6 @@ go_test(
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v3:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -10,9 +10,7 @@ import (
)
// This computes validator balance delta from validator votes.
// It returns a list of deltas that represents the difference between old
// balances and new balances. This function assumes the caller holds a lock in
// Store.nodesLock and Store.votesLock
// It returns a list of deltas that represents the difference between old balances and new balances.
func computeDeltas(
ctx context.Context,
count int,

View File

@@ -41,14 +41,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
}
// Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain
f.store.checkpointsLock.RLock()
f.store.checkpointsLock.Lock()
bjcp := f.store.bestJustifiedCheckpoint
jcp := f.store.justifiedCheckpoint
fcp := f.store.finalizedCheckpoint
f.store.checkpointsLock.RUnlock()
if bjcp.Epoch > jcp.Epoch {
finalizedSlot, err := slots.EpochStart(fcp.Epoch)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
@@ -58,15 +59,15 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
// loop call here.
r, err := f.AncestorRoot(ctx, bjcp.Root, finalizedSlot)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
if r == fcp.Root {
f.store.checkpointsLock.Lock()
f.store.prevJustifiedCheckpoint = jcp
f.store.justifiedCheckpoint = bjcp
f.store.checkpointsLock.Unlock()
}
}
f.store.checkpointsLock.Unlock()
if !features.Get().DisablePullTips {
f.updateUnrealizedCheckpoints()
}

View File

@@ -17,7 +17,6 @@ import (
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
pmath "github.com/prysmaticlabs/prysm/v3/math"
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
"github.com/prysmaticlabs/prysm/v3/time/slots"
@@ -189,15 +188,11 @@ func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkp
return err
}
jcRoot := bytesutil.ToBytes32(jc.Root)
// release the checkpoints lock here because
// AncestorRoot takes a lock on nodes and that can lead
// to double locks
f.store.checkpointsLock.Unlock()
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
if err != nil {
f.store.checkpointsLock.Unlock()
return err
}
f.store.checkpointsLock.Lock()
if root == currentRoot {
f.store.prevJustifiedCheckpoint = f.store.justifiedCheckpoint
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
@@ -282,51 +277,47 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
}
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, types.Slot, error) {
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "protoArray.CommonAncestorRoot")
defer span.End()
f.store.nodesLock.RLock()
defer f.store.nodesLock.RUnlock()
// Do nothing if the two input roots are the same.
if r1 == r2 {
return r1, nil
}
i1, ok := f.store.nodesIndices[r1]
if !ok || i1 >= uint64(len(f.store.nodes)) {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
}
// Do nothing if the two input roots are the same.
if r1 == r2 {
n1 := f.store.nodes[i1]
return r1, n1.slot, nil
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
i2, ok := f.store.nodesIndices[r2]
if !ok || i2 >= uint64(len(f.store.nodes)) {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
for {
if ctx.Err() != nil {
return [32]byte{}, 0, ctx.Err()
return [32]byte{}, ctx.Err()
}
if i1 > i2 {
n1 := f.store.nodes[i1]
i1 = n1.parent
// Reaches the end of the tree and unable to find common ancestor.
if i1 >= uint64(len(f.store.nodes)) {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
} else {
n2 := f.store.nodes[i2]
i2 = n2.parent
// Reaches the end of the tree and unable to find common ancestor.
if i2 >= uint64(len(f.store.nodes)) {
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
}
}
if i1 == i2 {
n1 := f.store.nodes[i1]
return n1.root, n1.slot, nil
return n1.root, nil
}
}
}
@@ -415,12 +406,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
if !s.viableForHead(bestNode) {
s.allTipsAreInvalid = true
s.checkpointsLock.RLock()
jEpoch := s.justifiedCheckpoint.Epoch
fEpoch := s.finalizedCheckpoint.Epoch
s.checkpointsLock.RUnlock()
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, fEpoch, bestNode.justifiedEpoch, jEpoch)
bestNode.slot, bestNode.weight/10e9, bestNode.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestNode.justifiedEpoch, s.justifiedCheckpoint.Epoch)
}
s.allTipsAreInvalid = false
@@ -439,8 +426,7 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
return bestNode.root, nil
}
// updateCanonicalNodes updates the canonical nodes mapping given the input
// block root. This function assumes the caller holds a lock in Store.nodesLock
// updateCanonicalNodes updates the canonical nodes mapping given the input block root.
func (s *Store) updateCanonicalNodes(ctx context.Context, root [32]byte) error {
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.updateCanonicalNodes")
defer span.End()
@@ -562,14 +548,14 @@ func (s *Store) insert(ctx context.Context,
if slot > s.highestReceivedSlot {
s.highestReceivedSlot = slot
}
return n, nil
}
// applyWeightChanges iterates backwards through the nodes in store. It checks all nodes parent
// and its best child. For each node, it updates the weight with input delta and
// back propagate the nodes' delta to its parents' delta. After scoring changes,
// the best child is then updated along with the best descendant. This function
// assumes the caller holds a lock in Store.nodesLock
// the best child is then updated along with the best descendant.
func (s *Store) applyWeightChanges(
ctx context.Context, newBalances []uint64, delta []int,
) error {
@@ -885,15 +871,6 @@ func (s *Store) viableForHead(node *Node) bool {
// It's also viable if we are in genesis epoch.
justified := s.justifiedCheckpoint.Epoch == node.justifiedEpoch || s.justifiedCheckpoint.Epoch == 0
finalized := s.finalizedCheckpoint.Epoch == node.finalizedEpoch || s.finalizedCheckpoint.Epoch == 0
if features.Get().EnableDefensivePull {
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(s.genesisTime), 0))
if !justified && s.justifiedCheckpoint.Epoch+1 == currentEpoch {
if node.unrealizedJustifiedEpoch+1 >= currentEpoch {
justified = true
finalized = true
}
}
}
return justified && finalized
}
@@ -923,8 +900,6 @@ func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
// store-tracked list. Votes from these validators are not accounted for
// in forkchoice.
func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.ValidatorIndex) {
f.votesLock.RLock()
defer f.votesLock.RUnlock()
f.store.nodesLock.Lock()
defer f.store.nodesLock.Unlock()
// return early if the index was already included:
@@ -934,6 +909,9 @@ func (f *ForkChoice) InsertSlashedIndex(ctx context.Context, index types.Validat
f.store.slashedIndices[index] = true
// Subtract last vote from this equivocating validator
f.votesLock.RLock()
defer f.votesLock.RUnlock()
if index >= types.ValidatorIndex(len(f.balances)) {
return
}
@@ -1091,7 +1069,3 @@ func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
}
return count, nil
}
func (*ForkChoice) ForkChoiceDump(_ context.Context) (*v1.ForkChoiceResponse, error) {
return nil, errors.New("ForkChoiceDump is not supported by protoarray")
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
@@ -678,85 +677,73 @@ func TestStore_CommonAncestor(t *testing.T) {
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
wantSlot types.Slot
}{
{
name: "Common ancestor between c and b is a",
r1: [32]byte{'c'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between c and d is a",
r1: [32]byte{'c'},
r2: [32]byte{'d'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between c and e is a",
r1: [32]byte{'c'},
r2: [32]byte{'e'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between g and f is c",
r1: [32]byte{'g'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
wantSlot: 2,
},
{
name: "Common ancestor between f and h is c",
r1: [32]byte{'f'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
wantSlot: 2,
},
{
name: "Common ancestor between g and h is c",
r1: [32]byte{'g'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'c'},
wantSlot: 2,
},
{
name: "Common ancestor between b and h is a",
r1: [32]byte{'b'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'e'},
r2: [32]byte{'h'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between i and f is c",
r1: [32]byte{'i'},
r2: [32]byte{'f'},
wantRoot: [32]byte{'c'},
wantSlot: 2,
},
{
name: "Common ancestor between e and h is a",
r1: [32]byte{'j'},
r2: [32]byte{'g'},
wantRoot: [32]byte{'c'},
wantSlot: 2,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
require.Equal(t, tc.wantSlot, gotSlot)
})
}
@@ -779,59 +766,52 @@ func TestStore_CommonAncestor(t *testing.T) {
r1 [32]byte
r2 [32]byte
wantRoot [32]byte
wantSlot types.Slot
}{
{
name: "Common ancestor between a and b is a",
r1: [32]byte{'a'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
{
name: "Common ancestor between b and d is b",
r1: [32]byte{'d'},
r2: [32]byte{'b'},
wantRoot: [32]byte{'b'},
wantSlot: 1,
},
{
name: "Common ancestor between d and a is a",
r1: [32]byte{'d'},
r2: [32]byte{'a'},
wantRoot: [32]byte{'a'},
wantSlot: 0,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
require.NoError(t, err)
require.Equal(t, tc.wantRoot, gotRoot)
require.Equal(t, tc.wantSlot, gotSlot)
})
}
// Equal inputs should return the same root.
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
require.NoError(t, err)
require.Equal(t, [32]byte{'b'}, r)
require.Equal(t, types.Slot(1), s)
// Requesting finalized root (last node) should return the same root.
r, s, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'a'})
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
require.NoError(t, err)
require.Equal(t, [32]byte{'a'}, r)
require.Equal(t, types.Slot(0), s)
// Requesting unknown root
_, _, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'z'})
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
_, _, err = f.CommonAncestor(ctx, [32]byte{'z'}, [32]byte{'a'})
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'y'}, [32]byte{'z'}, [32]byte{}, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// broken link
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
}
@@ -888,43 +868,6 @@ func TestStore_ViableForHead(t *testing.T) {
}
}
func TestStore_ViableForHead_DefensivePull(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableDefensivePull: true,
})
defer resetCfg()
tests := []struct {
n *Node
justifiedEpoch types.Epoch
finalizedEpoch types.Epoch
currentEpoch types.Epoch
want bool
}{
{&Node{}, 0, 0, 0, true},
{&Node{}, 1, 0, 1, false},
{&Node{}, 0, 1, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, 3, true},
{&Node{unrealizedFinalizedEpoch: 3, unrealizedJustifiedEpoch: 4}, 3, 2, 4, true},
{&Node{unrealizedFinalizedEpoch: 2, unrealizedJustifiedEpoch: 3}, 3, 2, 4, true},
{&Node{unrealizedFinalizedEpoch: 1, unrealizedJustifiedEpoch: 2}, 3, 2, 4, false},
}
for _, tc := range tests {
jc := &forkchoicetypes.Checkpoint{Epoch: tc.justifiedEpoch}
fc := &forkchoicetypes.Checkpoint{Epoch: tc.finalizedEpoch}
currentTime := uint64(time.Now().Unix())
driftSeconds := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
s := &Store{
justifiedCheckpoint: jc,
finalizedCheckpoint: fc,
genesisTime: currentTime - driftSeconds*uint64(tc.currentEpoch),
}
assert.Equal(t, tc.want, s.viableForHead(tc.n))
}
}
func TestStore_HasParent(t *testing.T) {
tests := []struct {
m map[[32]byte]uint64

View File

@@ -26,6 +26,7 @@ go_library(
"//beacon-chain/db/slasherkv:go_default_library",
"//beacon-chain/deterministic-genesis:go_default_library",
"//beacon-chain/execution:go_default_library",
"//beacon-chain/forkchoice:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/gateway:go_default_library",

View File

@@ -28,6 +28,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/slasherkv"
interopcoldstart "github.com/prysmaticlabs/prysm/v3/beacon-chain/deterministic-genesis"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/execution"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/gateway"
@@ -99,12 +100,14 @@ type BeaconNode struct {
stateFeed *event.Feed
blockFeed *event.Feed
opFeed *event.Feed
forkChoiceStore forkchoice.ForkChoicer
stateGen *stategen.State
collector *bcnodeCollector
slasherBlockHeadersFeed *event.Feed
slasherAttestationsFeed *event.Feed
finalizedStateAtStartUp state.BeaconState
serviceFlagOpts *serviceFlagOpts
blockchainFlagOpts []blockchain.Option
GenesisInitializer genesis.Initializer
CheckpointInitializer checkpoint.Initializer
}
@@ -226,6 +229,9 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
return nil, err
}
log.Debugln("Starting Fork Choice")
beacon.startForkChoice()
log.Debugln("Registering Blockchain Service")
if err := beacon.registerBlockchainService(); err != nil {
return nil, err
@@ -349,6 +355,14 @@ func (b *BeaconNode) Close() {
close(b.stop)
}
func (b *BeaconNode) startForkChoice() {
if !features.Get().DisableForkchoiceDoublyLinkedTree {
b.forkChoiceStore = doublylinkedtree.New()
} else {
b.forkChoiceStore = protoarray.New()
}
}
func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
baseDir := cliCtx.String(cmd.DataDirFlag.Name)
dbPath := filepath.Join(baseDir, kv.BeaconNodeDbDirName)
@@ -595,19 +609,13 @@ func (b *BeaconNode) registerBlockchainService() error {
blockchain.WithSlashingPool(b.slashingsPool),
blockchain.WithP2PBroadcaster(b.fetchP2P()),
blockchain.WithStateNotifier(b),
blockchain.WithForkChoiceStore(b.forkChoiceStore),
blockchain.WithAttestationService(attService),
blockchain.WithStateGen(b.stateGen),
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
blockchain.WithProposerIdsCache(b.proposerIdsCache),
)
if features.Get().DisableForkchoiceDoublyLinkedTree {
opts = append(opts, blockchain.WithForkChoiceStore(protoarray.New()))
} else {
opts = append(opts, blockchain.WithForkChoiceStore(doublylinkedtree.New()))
}
blockchainService, err := blockchain.NewService(b.ctx, opts...)
if err != nil {
return errors.Wrap(err, "could not register blockchain service")
@@ -835,7 +843,7 @@ func (b *BeaconNode) registerRPCService() error {
return b.services.RegisterService(rpcService)
}
func (b *BeaconNode) registerPrometheusService(_ *cli.Context) error {
func (b *BeaconNode) registerPrometheusService(cliCtx *cli.Context) error {
var additionalHandlers []prometheus.Handler
var p *p2p.Service
if err := b.services.FetchService(&p); err != nil {

View File

@@ -179,14 +179,14 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
}
return errors.New("failed to find peers for subnet")
}(); err != nil {
log.WithError(err).Error("Failed to find peers")
log.WithError(err).Debug("Failed to find peers")
tracing.AnnotateError(span, err)
}
}
// In the event our sync message is outdated and beyond the
// acceptable threshold, we exit early and do not broadcast it.
if err := altair.ValidateSyncMessageTime(sMsg.Slot, s.genesisTime, params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
log.WithError(err).Warn("Sync Committee Message is too old to broadcast, discarding it")
log.WithError(err).Debug("Sync committee message is too old to broadcast, discarding it")
return
}

View File

@@ -1,39 +1,20 @@
package p2p
import (
"strings"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
knownAgentVersions = []string{
"lighthouse",
"nimbus",
"prysm",
"teku",
"js-libp2p",
"rust-libp2p",
}
p2pPeerCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "p2p_peer_count",
Help: "The number of peers in a given state.",
},
[]string{"state"})
connectedPeersCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "connected_libp2p_peers",
Help: "Tracks the total number of connected libp2p peers by agent string",
},
[]string{"agent"},
)
avgScoreConnectedClients = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "connected_libp2p_peers_average_scores",
Help: "Tracks the overall p2p scores of connected libp2p peers by agent string",
},
[]string{"agent"},
)
totalPeerCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "libp2p_peers",
Help: "Tracks the total number of libp2p peers",
})
repeatPeerConnections = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_repeat_attempts",
Help: "The number of repeat attempts the connection handler is triggered for a peer.",
@@ -65,60 +46,10 @@ var (
)
func (s *Service) updateMetrics() {
connectedPeers := s.peers.Connected()
p2pPeerCount.WithLabelValues("Connected").Set(float64(len(connectedPeers)))
totalPeerCount.Set(float64(len(s.peers.Connected())))
p2pPeerCount.WithLabelValues("Connected").Set(float64(len(s.peers.Connected())))
p2pPeerCount.WithLabelValues("Disconnected").Set(float64(len(s.peers.Disconnected())))
p2pPeerCount.WithLabelValues("Connecting").Set(float64(len(s.peers.Connecting())))
p2pPeerCount.WithLabelValues("Disconnecting").Set(float64(len(s.peers.Disconnecting())))
p2pPeerCount.WithLabelValues("Bad").Set(float64(len(s.peers.Bad())))
store := s.Host().Peerstore()
numConnectedPeersByClient := make(map[string]float64)
peerScoresByClient := make(map[string][]float64)
for i := 0; i < len(connectedPeers); i++ {
p := connectedPeers[i]
pid, err := peer.Decode(p.String())
if err != nil {
log.WithError(err).Debug("Could not decode peer string")
continue
}
// Get the agent data.
rawAgent, err := store.Get(pid, "AgentVersion")
agent, ok := rawAgent.(string)
if err != nil || !ok {
agent = "unknown"
}
foundName := "unknown"
for _, knownAgent := range knownAgentVersions {
// If the agent string matches one of our known agents, we set
// the value to our own, sanitized string.
if strings.Contains(strings.ToLower(agent), knownAgent) {
foundName = knownAgent
}
}
numConnectedPeersByClient[foundName] += 1
// Get peer scoring data.
overallScore := s.peers.Scorers().Score(pid)
peerScoresByClient[foundName] = append(peerScoresByClient[foundName], overallScore)
}
for agent, total := range numConnectedPeersByClient {
connectedPeersCount.WithLabelValues(agent).Set(total)
}
for agent, scoringData := range peerScoresByClient {
avgScore := average(scoringData)
avgScoreConnectedClients.WithLabelValues(agent).Set(avgScore)
}
}
func average(xs []float64) float64 {
if len(xs) == 0 {
return 0
}
total := 0.0
for _, v := range xs {
total += v
}
return total / float64(len(xs))
}

View File

@@ -245,7 +245,9 @@ func (s *Service) Start() {
})
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
async.RunEvery(s.ctx, params.BeaconNetworkConfig().RespTimeout, s.updateMetrics)
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
async.RunEvery(s.ctx, refreshRate, func() {
s.RefreshENR()
})
async.RunEvery(s.ctx, 1*time.Minute, func() {
log.WithFields(logrus.Fields{
"inbound": len(s.peers.InboundConnected()),

View File

@@ -6,6 +6,12 @@ datadir: /var/lib/prysm/beacon
# http-web3provider: ETH1 API endpoint, eg. http://localhost:8545 for a local geth service on the default port
http-web3provider: http://localhost:8545
# fallback-web3provider: List of backup ETH1 API endpoints, used if above is not working
# For example:
# fallback-web3provider:
# - https://mainnet.infura.io/v3/YOUR-PROJECT-ID
# - https://eth-mainnet.alchemyapi.io/v2/YOUR-PROJECT-ID
# Optional tuning parameters
# For full list, see https://docs.prylabs.network/docs/prysm-usage/parameters

View File

@@ -50,7 +50,6 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v2/debug/beacon/states/{state_id}",
"/eth/v1/debug/beacon/heads",
"/eth/v2/debug/beacon/heads",
"/eth/v1/debug/beacon/forkchoice",
"/eth/v1/config/fork_schedule",
"/eth/v1/config/deposit_contract",
"/eth/v1/config/spec",
@@ -186,8 +185,6 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.GetResponse = &forkChoiceHeadsResponseJson{}
case "/eth/v2/debug/beacon/heads":
endpoint.GetResponse = &v2ForkChoiceHeadsResponseJson{}
case "/eth/v1/debug/beacon/forkchoice":
endpoint.GetResponse = &forkchoiceResponse{}
case "/eth/v1/config/fork_schedule":
endpoint.GetResponse = &forkScheduleResponseJson{}
case "/eth/v1/config/deposit_contract":

View File

@@ -277,18 +277,6 @@ type submitContributionAndProofsRequestJson struct {
Data []*signedContributionAndProofJson `json:"data"`
}
type forkchoiceResponse struct {
JustifiedCheckpoint *checkpointJson `json:"justified_checkpoint"`
FinalizedCheckpoint *checkpointJson `json:"finalized_checkpoint"`
BestJustifiedCheckpoint *checkpointJson `json:"best_justified_checkpoint"`
UnrealizedJustifiedCheckpoint *checkpointJson `json:"unrealized_justified_checkpoint"`
UnrealizedFinalizedCheckpoint *checkpointJson `json:"unrealized_finalized_checkpoint"`
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
HeadRoot string `json:"head_root" hex:"true"`
ForkChoiceNodes []*forkChoiceNodeJson `json:"forkchoice_nodes"`
}
//----------------
// Reusable types.
//----------------
@@ -793,28 +781,14 @@ type validatorRegistrationJson struct {
}
type signedValidatorRegistrationJson struct {
Message *validatorRegistrationJson `json:"message"`
Signature string `json:"signature" hex:"true"`
Message validatorRegistrationJson `json:"message"`
Signature string `json:"signature" hex:"true"`
}
type signedValidatorRegistrationsRequestJson struct {
Registrations []*signedValidatorRegistrationJson `json:"registrations"`
}
type forkChoiceNodeJson struct {
Slot string `json:"slot"`
Root string `json:"root" hex:"true"`
ParentRoot string `json:"parent_root" hex:"true"`
JustifiedEpoch string `json:"justified_epoch"`
FinalizedEpoch string `json:"finalized_epoch"`
UnrealizedJustifiedEpoch string `json:"unrealized_justified_epoch"`
UnrealizedFinalizedEpoch string `json:"unrealized_finalized_epoch"`
Balance string `json:"balance"`
Weight string `json:"weight"`
ExecutionOptimistic bool `json:"execution_optimistic"`
ExecutionPayload string `json:"execution_payload" hex:"true"`
}
//----------------
// SSZ
// ---------------

View File

@@ -23,7 +23,6 @@ go_library(
"//beacon-chain/core/feed/block:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filters:go_default_library",
"//beacon-chain/execution:go_default_library",
@@ -83,7 +82,6 @@ go_test(
"//api/grpc:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/execution/testing:go_default_library",

View File

@@ -722,24 +722,66 @@ func (bs *Server) ListBlockAttestations(ctx context.Context, req *ethpbv1.BlockR
return nil, err
}
v1Alpha1Attestations := blk.Block().Body().Attestations()
v1Attestations := make([]*ethpbv1.Attestation, 0, len(v1Alpha1Attestations))
for _, att := range v1Alpha1Attestations {
migratedAtt := migration.V1Alpha1AttestationToV1(att)
v1Attestations = append(v1Attestations, migratedAtt)
_, err = blk.PbPhase0Block()
if err != nil && !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
if err == nil {
v1Blk, err := migration.SignedBeaconBlock(blk)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return &ethpbv1.BlockAttestationsResponse{
Data: v1Blk.Block.Body.Attestations,
ExecutionOptimistic: false,
}, nil
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
altairBlk, err := blk.PbAltairBlock()
if err != nil && !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return &ethpbv1.BlockAttestationsResponse{
Data: v1Attestations,
ExecutionOptimistic: isOptimistic,
}, nil
if err == nil {
if altairBlk == nil {
return nil, status.Errorf(codes.Internal, "Nil block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlk.Block)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
return &ethpbv1.BlockAttestationsResponse{
Data: v2Blk.Body.Attestations,
ExecutionOptimistic: false,
}, nil
}
bellatrixBlock, err := blk.PbBellatrixBlock()
if err != nil && !errors.Is(err, blocks.ErrUnsupportedGetter) {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
if err == nil {
if bellatrixBlock == nil {
return nil, status.Errorf(codes.Internal, "Nil block")
}
v2Blk, err := migration.V1Alpha1BeaconBlockBellatrixToV2(bellatrixBlock.Block)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
root, err := blk.Block().HashTreeRoot()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get block root: %v", err)
}
isOptimistic, err := bs.OptimisticModeFetcher.IsOptimisticForRoot(ctx, root)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check if block is optimistic: %v", err)
}
return &ethpbv1.BlockAttestationsResponse{
Data: v2Blk.Body.Attestations,
ExecutionOptimistic: isOptimistic,
}, nil
}
return nil, status.Errorf(codes.Internal, "Could not get signed beacon block: %v", err)
}
func (bs *Server) blockFromBlockID(ctx context.Context, blockId []byte) (interfaces.SignedBeaconBlock, error) {

View File

@@ -1857,11 +1857,8 @@ func TestServer_ListBlockAttestations(t *testing.T) {
v1Block, err := migration.V1Alpha1ToV1SignedBlock(tt.want)
require.NoError(t, err)
blkAtts := blk.Data
if len(blkAtts) == 0 {
blkAtts = nil
}
if !reflect.DeepEqual(blkAtts, v1Block.Block.Body.Attestations) {
if !reflect.DeepEqual(blk.Data, v1Block.Block.Body.Attestations) {
t.Error("Expected attestations to equal")
}
})
@@ -1964,11 +1961,7 @@ func TestServer_ListBlockAttestations(t *testing.T) {
v1Block, err := migration.V1Alpha1BeaconBlockAltairToV2(tt.want.Block)
require.NoError(t, err)
blkAtts := blk.Data
if len(blkAtts) == 0 {
blkAtts = nil
}
if !reflect.DeepEqual(blkAtts, v1Block.Body.Attestations) {
if !reflect.DeepEqual(blk.Data, v1Block.Body.Attestations) {
t.Error("Expected attestations to equal")
}
})
@@ -2071,11 +2064,7 @@ func TestServer_ListBlockAttestations(t *testing.T) {
v1Block, err := migration.V1Alpha1BeaconBlockBellatrixToV2(tt.want.Block)
require.NoError(t, err)
blkAtts := blk.Data
if len(blkAtts) == 0 {
blkAtts = nil
}
if !reflect.DeepEqual(blkAtts, v1Block.Body.Attestations) {
if !reflect.DeepEqual(blk.Data, v1Block.Body.Attestations) {
t.Error("Expected attestations to equal")
}
})

View File

@@ -8,7 +8,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/operation"
corehelpers "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
@@ -165,10 +164,6 @@ func (bs *Server) SubmitAttesterSlashing(ctx context.Context, req *ethpbv1.Attes
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, req.Attestation_1.Data.Slot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
}
alphaSlashing := migration.V1AttSlashingToV1Alpha1(req)
err = blocks.VerifyAttesterSlashing(ctx, headState, alphaSlashing)
@@ -221,10 +216,6 @@ func (bs *Server) SubmitProposerSlashing(ctx context.Context, req *ethpbv1.Propo
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, req.SignedHeader_1.Message.Slot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
}
alphaSlashing := migration.V1ProposerSlashingToV1Alpha1(req)
err = blocks.VerifyProposerSlashing(headState, alphaSlashing)
@@ -278,14 +269,6 @@ func (bs *Server) SubmitVoluntaryExit(ctx context.Context, req *ethpbv1.SignedVo
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
s, err := slots.EpochStart(req.Message.Epoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get epoch from message: %v", err)
}
headState, err = transition.ProcessSlotsIfPossible(ctx, headState, s)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not process slots: %v", err)
}
validator, err := headState.ValidatorAtIndexReadOnly(req.Message.ValidatorIndex)
if err != nil {

View File

@@ -11,7 +11,6 @@ import (
grpcutil "github.com/prysmaticlabs/prysm/v3/api/grpc"
blockchainmock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
slashingsmock "github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings/mock"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/voluntaryexits/mock"
@@ -445,80 +444,6 @@ func TestSubmitAttesterSlashing_Ok(t *testing.T) {
assert.Equal(t, true, broadcaster.BroadcastCalled)
}
func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
params.OverrideBeaconConfig(config)
bs, keys := util.DeterministicGenesisState(t, 1)
slashing := &ethpbv1.AttesterSlashing{
Attestation_1: &ethpbv1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1.AttestationData{
Slot: params.BeaconConfig().SlotsPerEpoch,
Index: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
Source: &ethpbv1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
},
Target: &ethpbv1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
},
},
Signature: make([]byte, 96),
},
Attestation_2: &ethpbv1.IndexedAttestation{
AttestingIndices: []uint64{0},
Data: &ethpbv1.AttestationData{
Slot: params.BeaconConfig().SlotsPerEpoch,
Index: 1,
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
Source: &ethpbv1.Checkpoint{
Epoch: 1,
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
},
Target: &ethpbv1.Checkpoint{
Epoch: 10,
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
},
},
Signature: make([]byte, 96),
},
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
for _, att := range []*ethpbv1.IndexedAttestation{slashing.Attestation_1, slashing.Attestation_2} {
sb, err := signing.ComputeDomainAndSign(newBs, att.Data.Target.Epoch, att.Data, params.BeaconConfig().DomainBeaconAttester, keys[0])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
att.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitAttesterSlashing(ctx, slashing)
require.NoError(t, err)
pendingSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, bs, true)
require.Equal(t, 1, len(pendingSlashings))
assert.DeepEqual(t, migration.V1AttSlashingToV1Alpha1(slashing), pendingSlashings[0])
assert.Equal(t, true, broadcaster.BroadcastCalled)
}
func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
ctx := context.Background()
bs, err := util.NewBeaconState()
@@ -626,68 +551,6 @@ func TestSubmitProposerSlashing_Ok(t *testing.T) {
assert.Equal(t, true, broadcaster.BroadcastCalled)
}
func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
params.OverrideBeaconConfig(config)
bs, keys := util.DeterministicGenesisState(t, 1)
slashing := &ethpbv1.ProposerSlashing{
SignedHeader_1: &ethpbv1.SignedBeaconBlockHeader{
Message: &ethpbv1.BeaconBlockHeader{
Slot: params.BeaconConfig().SlotsPerEpoch,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot1"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot1"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot1"), 32),
},
Signature: make([]byte, 96),
},
SignedHeader_2: &ethpbv1.SignedBeaconBlockHeader{
Message: &ethpbv1.BeaconBlockHeader{
Slot: params.BeaconConfig().SlotsPerEpoch,
ProposerIndex: 0,
ParentRoot: bytesutil.PadTo([]byte("parentroot2"), 32),
StateRoot: bytesutil.PadTo([]byte("stateroot2"), 32),
BodyRoot: bytesutil.PadTo([]byte("bodyroot2"), 32),
},
Signature: make([]byte, 96),
},
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch)
require.NoError(t, err)
for _, h := range []*ethpbv1.SignedBeaconBlockHeader{slashing.SignedHeader_1, slashing.SignedHeader_2} {
sb, err := signing.ComputeDomainAndSign(
newBs,
slots.ToEpoch(h.Message.Slot),
h.Message,
params.BeaconConfig().DomainBeaconProposer,
keys[0],
)
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
h.Signature = sig.Marshal()
}
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
SlashingsPool: &slashingsmock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitProposerSlashing(ctx, slashing)
require.NoError(t, err)
}
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
ctx := context.Background()
bs, err := util.NewBeaconState()
@@ -767,47 +630,6 @@ func TestSubmitVoluntaryExit_Ok(t *testing.T) {
assert.Equal(t, true, broadcaster.BroadcastCalled)
}
func TestSubmitVoluntaryExit_AcrossFork(t *testing.T) {
ctx := context.Background()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = params.BeaconConfig().ShardCommitteePeriod + 1
params.OverrideBeaconConfig(config)
bs, keys := util.DeterministicGenesisState(t, 1)
// Satisfy activity time required before exiting.
require.NoError(t, bs.SetSlot(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod))))
exit := &ethpbv1.SignedVoluntaryExit{
Message: &ethpbv1.VoluntaryExit{
Epoch: params.BeaconConfig().ShardCommitteePeriod + 1,
ValidatorIndex: 0,
},
Signature: make([]byte, 96),
}
newBs := bs.Copy()
newBs, err := transition.ProcessSlots(ctx, newBs, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)+1))
require.NoError(t, err)
sb, err := signing.ComputeDomainAndSign(newBs, exit.Message.Epoch, exit.Message, params.BeaconConfig().DomainVoluntaryExit, keys[0])
require.NoError(t, err)
sig, err := bls.SignatureFromBytes(sb)
require.NoError(t, err)
exit.Signature = sig.Marshal()
broadcaster := &p2pMock.MockBroadcaster{}
s := &Server{
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
VoluntaryExitsPool: &mock.PoolMock{},
Broadcaster: broadcaster,
}
_, err = s.SubmitVoluntaryExit(ctx, exit)
require.NoError(t, err)
}
func TestSubmitVoluntaryExit_InvalidValidatorIndex(t *testing.T) {
ctx := context.Background()

View File

@@ -31,8 +31,6 @@ go_test(
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
@@ -41,7 +39,6 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
],
)

View File

@@ -140,8 +140,3 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
return resp, nil
}
// GetForkChoice returns a dump fork choice store.
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceResponse, error) {
return ds.ForkFetcher.ForkChoicer().ForkChoiceDump(ctx)
}

View File

@@ -4,11 +4,8 @@ import (
"context"
"testing"
"github.com/golang/protobuf/ptypes/empty"
blockchainmock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/testutil"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
@@ -240,18 +237,3 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
}
})
}
func TestServer_GetForkChoice(t *testing.T) {
store := doublylinkedtree.New()
fRoot := [32]byte{'a'}
jRoot := [32]byte{'b'}
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: fRoot}
jc := &forkchoicetypes.Checkpoint{Epoch: 3, Root: jRoot}
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
require.NoError(t, store.UpdateJustifiedCheckpoint(jc))
bs := &Server{ForkFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
res, err := bs.GetForkChoice(context.Background(), &empty.Empty{})
require.NoError(t, err)
require.Equal(t, types.Epoch(3), res.JustifiedCheckpoint.Epoch, "Did not get wanted justified epoch")
require.Equal(t, types.Epoch(2), res.FinalizedCheckpoint.Epoch, "Did not get wanted finalized epoch")
}

View File

@@ -16,5 +16,4 @@ type Server struct {
HeadFetcher blockchain.HeadFetcher
StateFetcher statefetcher.Fetcher
OptimisticModeFetcher blockchain.OptimisticModeFetcher
ForkFetcher blockchain.ForkFetcher
}

View File

@@ -14,7 +14,6 @@ go_library(
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/p2p:go_default_library",
@@ -54,13 +53,11 @@ go_test(
"//beacon-chain/builder/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/execution/testing:go_default_library",
"//beacon-chain/forkchoice/protoarray:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/attestations/mock:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
@@ -75,7 +72,6 @@ go_test(
"//beacon-chain/sync/initial-sync/testing:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
@@ -90,7 +86,6 @@ go_test(
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -16,7 +16,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
rpchelpers "github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
statev1 "github.com/prysmaticlabs/prysm/v3/beacon-chain/state/v1"
@@ -195,11 +194,11 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
// where `epoch` is described as `epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD <= current_epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD + 1`.
//
// Algorithm:
// - Get the last valid epoch. This is the last epoch of the next sync committee period.
// - Get the state for the requested epoch. If it's a future epoch from the current sync committee period
// or an epoch from the next sync committee period, then get the current state.
// - Get the state's current sync committee. If it's an epoch from the next sync committee period, then get the next sync committee.
// - Get duties.
// - Get the last valid epoch. This is the last epoch of the next sync committee period.
// - Get the state for the requested epoch. If it's a future epoch from the current sync committee period
// or an epoch from the next sync committee period, then get the current state.
// - Get the state's current sync committee. If it's an epoch from the next sync committee period, then get the next sync committee.
// - Get duties.
func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncCommitteeDutiesRequest) (*ethpbv2.SyncCommitteeDutiesResponse, error) {
ctx, span := trace.StartSpan(ctx, "validator.GetSyncCommitteeDuties")
defer span.End()
@@ -404,11 +403,7 @@ func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlo
// ProduceBlindedBlock requests the beacon node to produce a valid unsigned blinded beacon block,
// which can then be signed by a proposer and submitted.
//
// Under the following conditions, this endpoint will return an error.
// - The node is syncing or optimistic mode (after bellatrix).
// - The builder is not figured (after bellatrix).
// - The relayer circuit breaker is activated (after bellatrix).
// - The relayer responded with an error (after bellatrix).
// Pre-Bellatrix, this endpoint will return a regular block.
func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.ProduceBlindedBlockResponse, error) {
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlock")
defer span.End()
@@ -417,76 +412,57 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
// We simply return the error because it's already a gRPC error.
return nil, err
}
v1alpha1req := &ethpbalpha.BlockRequest{
Slot: req.Slot,
RandaoReveal: req.RandaoReveal,
Graffiti: req.Graffiti,
}
// Before Bellatrix, return normal block.
if req.Slot < types.Slot(params.BeaconConfig().BellatrixForkEpoch)*params.BeaconConfig().SlotsPerEpoch {
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
if err != nil {
// We simply return err because it's already of a gRPC error type.
return nil, err
}
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
if ok {
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
if err != nil {
// We simply return err because it's already of a gRPC error type.
return nil, err
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
if ok {
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlindedBlockResponse{
Version: ethpbv2.Version_PHASE0,
Data: &ethpbv2.BlindedBeaconBlockContainer{
Block: &ethpbv2.BlindedBeaconBlockContainer_Phase0Block{Phase0Block: block},
},
}, nil
return &ethpbv2.ProduceBlindedBlockResponse{
Version: ethpbv2.Version_PHASE0,
Data: &ethpbv2.BlindedBeaconBlockContainer{
Block: &ethpbv2.BlindedBeaconBlockContainer_Phase0Block{Phase0Block: block},
},
}, nil
}
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
if ok {
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
if ok {
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlindedBlockResponse{
Version: ethpbv2.Version_ALTAIR,
Data: &ethpbv2.BlindedBeaconBlockContainer{
Block: &ethpbv2.BlindedBeaconBlockContainer_AltairBlock{AltairBlock: block},
},
}, nil
return &ethpbv2.ProduceBlindedBlockResponse{
Version: ethpbv2.Version_ALTAIR,
Data: &ethpbv2.BlindedBeaconBlockContainer{
Block: &ethpbv2.BlindedBeaconBlockContainer_AltairBlock{AltairBlock: block},
},
}, nil
}
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
if ok {
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2Blinded(bellatrixBlock.Bellatrix)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlindedBlockResponse{
Version: ethpbv2.Version_BELLATRIX,
Data: &ethpbv2.BlindedBeaconBlockContainer{
Block: &ethpbv2.BlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: block},
},
}, nil
}
// After Bellatrix, return blinded block.
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
}
if optimistic {
return nil, status.Errorf(codes.Unavailable, "The node is currently optimistic and cannot serve validators")
}
altairBlk, err := vs.V1Alpha1Server.BuildAltairBeaconBlock(ctx, v1alpha1req)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
ok, b, err := vs.V1Alpha1Server.GetAndBuildBlindBlock(ctx, altairBlk)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare blind beacon block: %v", err)
}
if !ok {
return nil, status.Error(codes.Unavailable, "Builder is not available due to miss-config or circuit breaker")
}
blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(b.GetBlindedBellatrix())
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
}
return &ethpbv2.ProduceBlindedBlockResponse{
Version: ethpbv2.Version_BELLATRIX,
Data: &ethpbv2.BlindedBeaconBlockContainer{
Block: &ethpbv2.BlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: blk},
},
}, nil
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
}
// ProduceBlindedBlockSSZ requests the beacon node to produce a valid unsigned blinded beacon block,
@@ -570,24 +546,7 @@ func (vs *Server) PrepareBeaconProposer(
defer span.End()
var feeRecipients []common.Address
var validatorIndices []types.ValidatorIndex
newRecipients := make([]*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer, 0, len(request.Recipients))
for _, r := range request.Recipients {
f, err := vs.V1Alpha1Server.BeaconDB.FeeRecipientByValidatorID(ctx, r.ValidatorIndex)
switch {
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
newRecipients = append(newRecipients, r)
case err != nil:
return nil, status.Errorf(codes.Internal, "Could not get fee recipient by validator index: %v", err)
default:
}
if common.BytesToAddress(r.FeeRecipient) != f {
newRecipients = append(newRecipients, r)
}
}
if len(newRecipients) == 0 {
return &emptypb.Empty{}, nil
}
for _, recipientContainer := range newRecipients {
for _, recipientContainer := range request.Recipients {
recipient := hexutil.Encode(recipientContainer.FeeRecipient)
if !common.IsHexAddress(recipient) {
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid fee recipient address: %v", recipient))
@@ -1035,6 +994,19 @@ func v1ValidatorStatusToV1Alpha1(valStatus ethpbv1.ValidatorStatus) ethpbalpha.V
}
}
func (vs *Server) v1BeaconBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv1.BeaconBlock, error) {
v1alpha1req := &ethpbalpha.BlockRequest{
Slot: req.Slot,
RandaoReveal: req.RandaoReveal,
Graffiti: req.Graffiti,
}
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
if err != nil {
return nil, err
}
return migration.V1Alpha1ToV1Block(v1alpha1resp.GetPhase0())
}
func syncCommitteeDutiesLastValidEpoch(currentEpoch types.Epoch) types.Epoch {
currentSyncPeriodIndex := currentEpoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod
// Return the last epoch of the next sync committee.

View File

@@ -13,13 +13,11 @@ import (
builderTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/builder/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
coreTime "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
dbutil "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
mockExecution "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations/mock"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
@@ -34,7 +32,6 @@ import (
mockSync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
@@ -47,7 +44,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/testing/require"
"github.com/prysmaticlabs/prysm/v3/testing/util"
"github.com/prysmaticlabs/prysm/v3/time/slots"
logTest "github.com/sirupsen/logrus/hooks/test"
"google.golang.org/protobuf/proto"
)
@@ -1845,7 +1841,7 @@ func TestProduceBlindedBlock(t *testing.T) {
assert.DeepEqual(t, aggregatedSig, blk.Body.SyncAggregate.SyncCommitteeSignature)
})
t.Run("Can get blind block from builder service", func(t *testing.T) {
t.Run("Bellatrix", func(t *testing.T) {
db := dbutil.SetupDB(t)
ctx := context.Background()
@@ -1853,8 +1849,6 @@ func TestProduceBlindedBlock(t *testing.T) {
bc := params.BeaconConfig().Copy()
bc.AltairForkEpoch = types.Epoch(0)
bc.BellatrixForkEpoch = types.Epoch(1)
bc.MaxBuilderConsecutiveMissedSlots = params.BeaconConfig().SlotsPerEpoch + 1
bc.MaxBuilderEpochMissedSlots = params.BeaconConfig().SlotsPerEpoch
params.OverrideBeaconConfig(bc)
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().SyncCommitteeSize)
@@ -1875,56 +1869,14 @@ func TestProduceBlindedBlock(t *testing.T) {
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
fb := util.HydrateSignedBeaconBlockBellatrix(&ethpbalpha.SignedBeaconBlockBellatrix{})
fb.Block.Body.ExecutionPayload.GasLimit = 123
wfb, err := blocks.NewSignedBeaconBlock(fb)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, wfb), "Could not save block")
r, err := wfb.Block().HashTreeRoot()
require.NoError(t, err)
sk, err := bls.RandKey()
require.NoError(t, err)
ti := time.Unix(0, 0)
ts, err := slots.ToTime(uint64(ti.Unix()), 33)
require.NoError(t, err)
require.NoError(t, beaconState.SetGenesisTime(uint64(ti.Unix())))
random, err := helpers.RandaoMix(beaconState, coreTime.CurrentEpoch(beaconState))
require.NoError(t, err)
bid := &ethpbalpha.BuilderBid{
Header: &enginev1.ExecutionPayloadHeader{
ParentHash: make([]byte, fieldparams.RootLength),
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: random,
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: make([]byte, fieldparams.RootLength),
BlockNumber: 1,
Timestamp: uint64(ts.Unix()),
},
Pubkey: sk.PublicKey().Marshal(),
Value: bytesutil.PadTo([]byte{1, 2, 3}, 32),
}
d := params.BeaconConfig().DomainApplicationBuilder
domain, err := signing.ComputeDomain(d, nil, nil)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(bid, domain)
require.NoError(t, err)
sBid := &ethpbalpha.SignedBuilderBid{
Message: bid,
Signature: sk.Sign(sr[:]).Marshal(),
}
v1Alpha1Server := &v1alpha1validator.Server{
BeaconDB: db,
ForkFetcher: &mockChain.ChainService{ForkChoiceStore: protoarray.New()},
TimeFetcher: &mockChain.ChainService{
Genesis: ti,
ExecutionEngineCaller: &mockExecution.EngineClient{
ExecutionBlock: &enginev1.ExecutionBlock{
TotalDifficulty: "0x1",
},
},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:], Block: wfb},
TimeFetcher: &mockChain.ChainService{},
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
OptimisticModeFetcher: &mockChain.ChainService{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
BlockReceiver: &mockChain.ChainService{},
@@ -1939,15 +1891,6 @@ func TestProduceBlindedBlock(t *testing.T) {
StateGen: stategen.New(db),
SyncCommitteePool: synccommittee.NewStore(),
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
BlockBuilder: &builderTest.MockBuilderService{
HasConfigured: true,
Bid: sBid,
},
FinalizationFetcher: &mockChain.ChainService{
FinalizedCheckPoint: &ethpbalpha.Checkpoint{
Root: r[:],
},
},
}
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
@@ -2005,10 +1948,8 @@ func TestProduceBlindedBlock(t *testing.T) {
require.NoError(t, v1Alpha1Server.SyncCommitteePool.SaveSyncCommitteeContribution(contribution))
v1Server := &Server{
V1Alpha1Server: v1Alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: &mockChain.ChainService{},
OptimisticModeFetcher: &mockChain.ChainService{},
V1Alpha1Server: v1Alpha1Server,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
randaoReveal, err := util.RandaoReveal(beaconState, 1, privKeys)
require.NoError(t, err)
@@ -3680,89 +3621,6 @@ func TestPrepareBeaconProposer(t *testing.T) {
})
}
}
func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) {
hook := logTest.NewGlobal()
db := dbutil.SetupDB(t)
ctx := context.Background()
v1Server := &v1alpha1validator.Server{
BeaconDB: db,
}
proposerServer := &Server{V1Alpha1Server: v1Server}
// New validator
f := bytesutil.PadTo([]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
req := &ethpbv1.PrepareBeaconProposerRequest{
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
{FeeRecipient: f, ValidatorIndex: 1},
},
}
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
require.NoError(t, err)
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
// Same validator
hook.Reset()
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
require.NoError(t, err)
require.LogsDoNotContain(t, hook, "Updated fee recipient addresses for validator indices")
// Same validator with different fee recipient
hook.Reset()
f = bytesutil.PadTo([]byte{0x01, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
req = &ethpbv1.PrepareBeaconProposerRequest{
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
{FeeRecipient: f, ValidatorIndex: 1},
},
}
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
require.NoError(t, err)
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
// More than one validator
hook.Reset()
f = bytesutil.PadTo([]byte{0x01, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
req = &ethpbv1.PrepareBeaconProposerRequest{
Recipients: []*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{
{FeeRecipient: f, ValidatorIndex: 1},
{FeeRecipient: f, ValidatorIndex: 2},
},
}
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
require.NoError(t, err)
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
// Same validators
hook.Reset()
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
require.NoError(t, err)
require.LogsDoNotContain(t, hook, "Updated fee recipient addresses for validator indices")
}
func BenchmarkServer_PrepareBeaconProposer(b *testing.B) {
db := dbutil.SetupDB(b)
ctx := context.Background()
v1Server := &v1alpha1validator.Server{
BeaconDB: db,
}
proposerServer := &Server{V1Alpha1Server: v1Server}
f := bytesutil.PadTo([]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
recipients := make([]*ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer, 0)
for i := 0; i < 10000; i++ {
recipients = append(recipients, &ethpbv1.PrepareBeaconProposerRequest_FeeRecipientContainer{FeeRecipient: f, ValidatorIndex: types.ValidatorIndex(i)})
}
req := &ethpbv1.PrepareBeaconProposerRequest{
Recipients: recipients,
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
if err != nil {
b.Fatal(err)
}
}
}
func TestServer_SubmitValidatorRegistrations(t *testing.T) {
type args struct {

View File

@@ -754,6 +754,8 @@ func (bs *Server) GetValidatorPerformance(
beforeTransitionBalances := make([]uint64, 0, responseCap)
afterTransitionBalances := make([]uint64, 0, responseCap)
effectiveBalances := make([]uint64, 0, responseCap)
inclusionSlots := make([]types.Slot, 0, responseCap)
inclusionDistances := make([]types.Slot, 0, responseCap)
correctlyVotedSource := make([]bool, 0, responseCap)
correctlyVotedTarget := make([]bool, 0, responseCap)
correctlyVotedHead := make([]bool, 0, responseCap)
@@ -787,6 +789,8 @@ func (bs *Server) GetValidatorPerformance(
if headState.Version() == version.Phase0 {
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochAttester)
inclusionSlots = append(inclusionSlots, summary.InclusionSlot)
inclusionDistances = append(inclusionDistances, summary.InclusionDistance)
} else {
correctlyVotedSource = append(correctlyVotedSource, summary.IsPrevEpochSourceAttester)
inactivityScores = append(inactivityScores, summary.InactivityScore)
@@ -802,7 +806,9 @@ func (bs *Server) GetValidatorPerformance(
BalancesBeforeEpochTransition: beforeTransitionBalances,
BalancesAfterEpochTransition: afterTransitionBalances,
MissingValidators: missingValidators,
InactivityScores: inactivityScores, // Only populated in Altair
InclusionSlots: inclusionSlots, // Only populated in phase0
InclusionDistances: inclusionDistances, // Only populated in phase 0
InactivityScores: inactivityScores, // Only populated in Altair
}, nil
}

View File

@@ -1846,9 +1846,12 @@ func TestGetValidatorPerformance_OK(t *testing.T) {
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
farFuture := params.BeaconConfig().FarFutureSlot
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
InclusionSlots: []types.Slot{farFuture, farFuture},
InclusionDistances: []types.Slot{farFuture, farFuture},
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
@@ -1915,9 +1918,12 @@ func TestGetValidatorPerformance_Indices(t *testing.T) {
require.NoError(t, err)
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
require.NoError(t, err)
farFuture := params.BeaconConfig().FarFutureSlot
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
InclusionSlots: []types.Slot{farFuture, farFuture},
InclusionDistances: []types.Slot{farFuture, farFuture},
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
@@ -1985,9 +1991,12 @@ func TestGetValidatorPerformance_IndicesPubkeys(t *testing.T) {
require.NoError(t, err)
_, err = precompute.ProcessRewardsAndPenaltiesPrecompute(c, bp, vp, precompute.AttestationsDelta, precompute.ProposersDelta)
require.NoError(t, err)
farFuture := params.BeaconConfig().FarFutureSlot
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
InclusionSlots: []types.Slot{farFuture, farFuture},
InclusionDistances: []types.Slot{farFuture, farFuture},
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
@@ -2056,6 +2065,8 @@ func TestGetValidatorPerformanceAltair_OK(t *testing.T) {
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
InclusionSlots: nil,
InclusionDistances: nil,
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},
@@ -2124,6 +2135,8 @@ func TestGetValidatorPerformanceBellatrix_OK(t *testing.T) {
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
CurrentEffectiveBalances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
InclusionSlots: nil,
InclusionDistances: nil,
CorrectlyVotedSource: []bool{false, false},
CorrectlyVotedTarget: []bool{false, false},
CorrectlyVotedHead: []bool{false, false},

View File

@@ -53,7 +53,6 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -65,7 +64,6 @@ go_library(
"//crypto/hash:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//monitoring/tracing:go_default_library",
"//network/forks:go_default_library",
"//proto/engine/v1:go_default_library",

View File

@@ -14,7 +14,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed"
blockfeed "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/feed/block"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/db/kv"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
@@ -82,26 +81,7 @@ func (vs *Server) PrepareBeaconProposer(
defer span.End()
var feeRecipients []common.Address
var validatorIndices []types.ValidatorIndex
newRecipients := make([]*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer, 0, len(request.Recipients))
for _, r := range request.Recipients {
f, err := vs.BeaconDB.FeeRecipientByValidatorID(ctx, r.ValidatorIndex)
switch {
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
newRecipients = append(newRecipients, r)
case err != nil:
return nil, status.Errorf(codes.Internal, "Could not get fee recipient by validator index: %v", err)
default:
}
if common.BytesToAddress(r.FeeRecipient) != f {
newRecipients = append(newRecipients, r)
}
}
if len(newRecipients) == 0 {
return &emptypb.Empty{}, nil
}
for _, recipientContainer := range newRecipients {
for _, recipientContainer := range request.Recipients {
recipient := hexutil.Encode(recipientContainer.FeeRecipient)
if !common.IsHexAddress(recipient) {
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Invalid fee recipient address: %v", recipient))

View File

@@ -15,8 +15,8 @@ import (
"go.opencensus.io/trace"
)
func (vs *Server) BuildAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.BuildAltairBeaconBlock")
func (vs *Server) buildAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.buildAltairBeaconBlock")
defer span.End()
blkData, err := vs.buildPhase0BlockData(ctx, req)
if err != nil {
@@ -55,7 +55,7 @@ func (vs *Server) BuildAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRe
func (vs *Server) getAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
ctx, span := trace.StartSpan(ctx, "ProposerServer.getAltairBeaconBlock")
defer span.End()
blk, err := vs.BuildAltairBeaconBlock(ctx, req)
blk, err := vs.buildAltairBeaconBlock(ctx, req)
if err != nil {
return nil, fmt.Errorf("could not build block data: %v", err)
}

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"fmt"
"math/big"
"time"
"github.com/pkg/errors"
@@ -20,7 +19,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/runtime/version"
@@ -39,14 +37,14 @@ var builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
const blockBuilderTimeout = 1 * time.Second
func (vs *Server) getBellatrixBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
altairBlk, err := vs.BuildAltairBeaconBlock(ctx, req)
altairBlk, err := vs.buildAltairBeaconBlock(ctx, req)
if err != nil {
return nil, err
}
registered, err := vs.validatorRegistered(ctx, altairBlk.ProposerIndex)
if registered && err == nil {
builderReady, b, err := vs.GetAndBuildBlindBlock(ctx, altairBlk)
builderReady, b, err := vs.getAndBuildBlindBlock(ctx, altairBlk)
if err != nil {
// In the event of an error, the node should fall back to default execution engine for building block.
log.WithError(err).Error("Failed to build a block from external builder, falling " +
@@ -110,7 +108,6 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
if blocks.IsPreBellatrixVersion(b.Version()) {
return nil, nil
}
h, err := b.Block().Body().Execution()
if err != nil {
return nil, err
@@ -123,24 +120,6 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
if err != nil {
return nil, err
}
if bid == nil || bid.Message == nil {
return nil, errors.New("builder returned nil bid")
}
v := new(big.Int).SetBytes(bytesutil.ReverseByteOrder(bid.Message.Value))
if v.String() == "0" {
return nil, errors.New("builder returned header with 0 bid amount")
}
emptyRoot, err := ssz.TransactionsRoot([][]byte{})
if err != nil {
return nil, err
}
if bytesutil.ToBytes32(bid.Message.Header.TransactionsRoot) == emptyRoot {
return nil, errors.New("builder returned header with an empty tx root")
}
if !bytes.Equal(bid.Message.Header.ParentHash, h.BlockHash()) {
return nil, fmt.Errorf("incorrect parent hash %#x != %#x", bid.Message.Header.ParentHash, h.BlockHash())
}
@@ -158,7 +137,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
}
log.WithFields(logrus.Fields{
"value": v.String(),
"bid": bytesutil.BytesToUint64BigEndian(bid.Message.Value),
"builderPubKey": fmt.Sprintf("%#x", bid.Message.Pubkey),
"blockHash": fmt.Sprintf("%#x", bid.Message.Header.BlockHash),
}).Info("Received header with bid")
@@ -378,10 +357,10 @@ func (vs *Server) circuitBreakBuilder(s types.Slot) (bool, error) {
return false, nil
}
// GetAndBuildBlindBlock builds blind block from builder network. Returns a boolean status, built block and error.
// Get and build blind block from builder network. Returns a boolean status, built block and error.
// If the status is false that means builder the header block is disallowed.
// This routine is time limited by `blockBuilderTimeout`.
func (vs *Server) GetAndBuildBlindBlock(ctx context.Context, b *ethpb.BeaconBlockAltair) (bool, *ethpb.GenericBeaconBlock, error) {
func (vs *Server) getAndBuildBlindBlock(ctx context.Context, b *ethpb.BeaconBlockAltair) (bool, *ethpb.GenericBeaconBlock, error) {
// No op. Builder is not defined. User did not specify a user URL. We should use local EE.
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
return false, nil, nil

View File

@@ -97,40 +97,6 @@ func TestServer_buildHeaderBlock(t *testing.T) {
}
func TestServer_getPayloadHeader(t *testing.T) {
emptyRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
ti, err := slots.ToTime(uint64(time.Now().Unix()), 0)
require.NoError(t, err)
sk, err := bls.RandKey()
require.NoError(t, err)
bid := &ethpb.BuilderBid{
Header: &v1.ExecutionPayloadHeader{
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
StateRoot: make([]byte, fieldparams.RootLength),
ReceiptsRoot: make([]byte, fieldparams.RootLength),
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
PrevRandao: make([]byte, fieldparams.RootLength),
BaseFeePerGas: make([]byte, fieldparams.RootLength),
BlockHash: make([]byte, fieldparams.RootLength),
TransactionsRoot: bytesutil.PadTo([]byte{1}, fieldparams.RootLength),
ParentHash: params.BeaconConfig().ZeroHash[:],
Timestamp: uint64(ti.Unix()),
},
Pubkey: sk.PublicKey().Marshal(),
Value: bytesutil.PadTo([]byte{1, 2, 3}, 32),
}
d := params.BeaconConfig().DomainApplicationBuilder
domain, err := signing.ComputeDomain(d, nil, nil)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(bid, domain)
require.NoError(t, err)
sBid := &ethpb.SignedBuilderBid{
Message: bid,
Signature: sk.Sign(sr[:]).Marshal(),
}
require.NoError(t, err)
tests := []struct {
name string
head interfaces.SignedBeaconBlock
@@ -165,7 +131,7 @@ func TestServer_getPayloadHeader(t *testing.T) {
err: "can't get header",
},
{
name: "0 bid",
name: "get header correct",
mock: &builderTest.MockBuilderService{
Bid: &ethpb.SignedBuilderBid{
Message: &ethpb.BuilderBid{
@@ -174,6 +140,7 @@ func TestServer_getPayloadHeader(t *testing.T) {
},
},
},
ErrGetHeader: errors.New("can't get header"),
},
fetcher: &blockchainTest.ChainService{
Block: func() interfaces.SignedBeaconBlock {
@@ -182,55 +149,18 @@ func TestServer_getPayloadHeader(t *testing.T) {
return wb
}(),
},
err: "builder returned header with 0 bid amount",
},
{
name: "invalid tx root",
mock: &builderTest.MockBuilderService{
Bid: &ethpb.SignedBuilderBid{
Message: &ethpb.BuilderBid{
Value: []byte{1},
Header: &v1.ExecutionPayloadHeader{
BlockNumber: 123,
TransactionsRoot: emptyRoot[:],
},
},
},
returnedHeader: &v1.ExecutionPayloadHeader{
BlockNumber: 123,
},
fetcher: &blockchainTest.ChainService{
Block: func() interfaces.SignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
require.NoError(t, err)
return wb
}(),
},
err: "builder returned header with an empty tx root",
},
{
name: "can get header",
mock: &builderTest.MockBuilderService{
Bid: sBid,
},
fetcher: &blockchainTest.ChainService{
Block: func() interfaces.SignedBeaconBlock {
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
require.NoError(t, err)
return wb
}(),
},
returnedHeader: bid.Header,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
vs := &Server{BlockBuilder: tc.mock, HeadFetcher: tc.fetcher, TimeFetcher: &blockchainTest.ChainService{
Genesis: time.Now(),
}}
vs := &Server{BlockBuilder: tc.mock, HeadFetcher: tc.fetcher}
h, err := vs.getPayloadHeaderFromBuilder(context.Background(), 0, 0)
if tc.err != "" {
if err != nil {
require.ErrorContains(t, tc.err, err)
} else {
require.NoError(t, err)
require.DeepEqual(t, tc.returnedHeader, h)
}
})
@@ -420,20 +350,20 @@ func TestServer_getAndBuildHeaderBlock(t *testing.T) {
vs := &Server{}
// Nil builder
ready, _, err := vs.GetAndBuildBlindBlock(ctx, nil)
ready, _, err := vs.getAndBuildBlindBlock(ctx, nil)
require.NoError(t, err)
require.Equal(t, false, ready)
// Not configured
vs.BlockBuilder = &builderTest.MockBuilderService{}
ready, _, err = vs.GetAndBuildBlindBlock(ctx, nil)
ready, _, err = vs.getAndBuildBlindBlock(ctx, nil)
require.NoError(t, err)
require.Equal(t, false, ready)
// Block is not ready
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true}
vs.FinalizationFetcher = &blockchainTest.ChainService{FinalizedCheckPoint: &ethpb.Checkpoint{}}
ready, _, err = vs.GetAndBuildBlindBlock(ctx, nil)
ready, _, err = vs.getAndBuildBlindBlock(ctx, nil)
require.NoError(t, err)
require.Equal(t, false, ready)
@@ -450,7 +380,7 @@ func TestServer_getAndBuildHeaderBlock(t *testing.T) {
vs.HeadFetcher = &blockchainTest.ChainService{Block: wb1}
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, ErrGetHeader: errors.New("could not get payload")}
vs.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: protoarray.New()}
ready, _, err = vs.GetAndBuildBlindBlock(ctx, &ethpb.BeaconBlockAltair{})
ready, _, err = vs.getAndBuildBlindBlock(ctx, &ethpb.BeaconBlockAltair{})
require.ErrorContains(t, "could not get payload", err)
require.Equal(t, false, ready)
@@ -526,7 +456,7 @@ func TestServer_getAndBuildHeaderBlock(t *testing.T) {
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, Bid: sBid}
vs.TimeFetcher = &blockchainTest.ChainService{Genesis: time.Now()}
vs.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: protoarray.New()}
ready, builtBlk, err := vs.GetAndBuildBlindBlock(ctx, altairBlk.Block)
ready, builtBlk, err := vs.getAndBuildBlindBlock(ctx, altairBlk.Block)
require.NoError(t, err)
require.Equal(t, true, ready)
require.DeepEqual(t, h, builtBlk.GetBlindedBellatrix().Body.ExecutionPayloadHeader)

View File

@@ -8,7 +8,6 @@ import (
fastssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
@@ -107,9 +106,6 @@ func (vs *Server) canonicalEth1Data(
canonicalEth1Data = beaconState.Eth1Data()
eth1BlockHash = bytesutil.ToBytes32(beaconState.Eth1Data().BlockHash)
}
if features.Get().DisableStakinContractCheck && eth1BlockHash == [32]byte{} {
return canonicalEth1Data, new(big.Int).SetInt64(0), nil
}
_, canonicalEth1DataHeight, err := vs.Eth1BlockFetcher.BlockExists(ctx, eth1BlockHash)
if err != nil {
return nil, nil, errors.Wrap(err, "could not fetch eth1data height")

View File

@@ -2357,84 +2357,6 @@ func TestProposer_PrepareBeaconProposer(t *testing.T) {
}
}
func TestProposer_PrepareBeaconProposerOverlapping(t *testing.T) {
hook := logTest.NewGlobal()
db := dbutil.SetupDB(t)
ctx := context.Background()
proposerServer := &Server{BeaconDB: db}
// New validator
f := bytesutil.PadTo([]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
req := &ethpb.PrepareBeaconProposerRequest{
Recipients: []*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer{
{FeeRecipient: f, ValidatorIndex: 1},
},
}
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
require.NoError(t, err)
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
// Same validator
hook.Reset()
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
require.NoError(t, err)
require.LogsDoNotContain(t, hook, "Updated fee recipient addresses for validator indices")
// Same validator with different fee recipient
hook.Reset()
f = bytesutil.PadTo([]byte{0x01, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
req = &ethpb.PrepareBeaconProposerRequest{
Recipients: []*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer{
{FeeRecipient: f, ValidatorIndex: 1},
},
}
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
require.NoError(t, err)
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
// More than one validator
hook.Reset()
f = bytesutil.PadTo([]byte{0x01, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
req = &ethpb.PrepareBeaconProposerRequest{
Recipients: []*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer{
{FeeRecipient: f, ValidatorIndex: 1},
{FeeRecipient: f, ValidatorIndex: 2},
},
}
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
require.NoError(t, err)
require.LogsContain(t, hook, "Updated fee recipient addresses for validator indices")
// Same validators
hook.Reset()
_, err = proposerServer.PrepareBeaconProposer(ctx, req)
require.NoError(t, err)
require.LogsDoNotContain(t, hook, "Updated fee recipient addresses for validator indices")
}
func BenchmarkServer_PrepareBeaconProposer(b *testing.B) {
db := dbutil.SetupDB(b)
ctx := context.Background()
proposerServer := &Server{BeaconDB: db}
f := bytesutil.PadTo([]byte{0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF, 0x01, 0xFF}, fieldparams.FeeRecipientLength)
recipients := make([]*ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer, 0)
for i := 0; i < 10000; i++ {
recipients = append(recipients, &ethpb.PrepareBeaconProposerRequest_FeeRecipientContainer{FeeRecipient: f, ValidatorIndex: types.ValidatorIndex(i)})
}
req := &ethpb.PrepareBeaconProposerRequest{
Recipients: recipients,
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := proposerServer.PrepareBeaconProposer(ctx, req)
if err != nil {
b.Fatal(err)
}
}
}
func TestProposer_SubmitValidatorRegistrations(t *testing.T) {
ctx := context.Background()
proposerServer := &Server{}

View File

@@ -123,9 +123,6 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine head state: %v", err)
}
if st == nil || st.IsNil() {
return nil, status.Errorf(codes.Internal, "head state is empty")
}
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.PublicKey))
if !ok {
return nil, status.Errorf(codes.NotFound, "Could not find validator index for public key %#x", req.PublicKey)

View File

@@ -48,18 +48,6 @@ func TestValidatorIndex_OK(t *testing.T) {
assert.NoError(t, err, "Could not get validator index")
}
func TestValidatorIndex_StateEmpty(t *testing.T) {
Server := &Server{
HeadFetcher: &mockChain.ChainService{},
}
pubKey := pubKey(1)
req := &ethpb.ValidatorIndexRequest{
PublicKey: pubKey,
}
_, err := Server.ValidatorIndex(context.Background(), req)
assert.ErrorContains(t, "head state is empty", err)
}
func TestWaitForActivation_ContextClosed(t *testing.T) {
beaconState, err := v1.InitializeFromProto(&ethpb.BeaconState{
Slot: 0,

View File

@@ -30,14 +30,13 @@ var errParticipation = status.Errorf(codes.Internal, "Failed to obtain epoch par
// ValidatorStatus returns the validator status of the current epoch.
// The status response can be one of the following:
//
// DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum.
// PENDING - validator is in Ethereum's activation queue.
// ACTIVE - validator is active.
// EXITING - validator has initiated an an exit request, or has dropped below the ejection balance and is being kicked out.
// EXITED - validator is no longer validating.
// SLASHING - validator has been kicked out due to meeting a slashing condition.
// UNKNOWN_STATUS - validator does not have a known status in the network.
// DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum.
// PENDING - validator is in Ethereum's activation queue.
// ACTIVE - validator is active.
// EXITING - validator has initiated an an exit request, or has dropped below the ejection balance and is being kicked out.
// EXITED - validator is no longer validating.
// SLASHING - validator has been kicked out due to meeting a slashing condition.
// UNKNOWN_STATUS - validator does not have a known status in the network.
func (vs *Server) ValidatorStatus(
ctx context.Context,
req *ethpb.ValidatorStatusRequest,
@@ -364,6 +363,15 @@ func (vs *Server) validatorStatus(
}
}
func (vs *Server) retrieveAfterEpochTransition(ctx context.Context, epoch types.Epoch) (state.BeaconState, error) {
endSlot, err := slots.EpochEnd(epoch)
if err != nil {
return nil, err
}
// replay to first slot of following epoch
return vs.ReplayerBuilder.ReplayerForSlot(endSlot).ReplayToSlot(ctx, endSlot+1)
}
func checkValidatorsAreRecent(headEpoch types.Epoch, req *ethpb.DoppelGangerRequest) (bool, *ethpb.DoppelGangerResponse) {
validatorsAreRecent := true
resp := &ethpb.DoppelGangerResponse{

View File

@@ -351,7 +351,6 @@ func (s *Service) Start() {
ReplayerBuilder: ch,
},
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
ForkFetcher: s.cfg.ForkFetcher,
}
ethpbv1alpha1.RegisterDebugServer(s.grpcServer, debugServer)
ethpbservice.RegisterBeaconDebugServer(s.grpcServer, debugServerV1)

View File

@@ -239,8 +239,7 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
// Is the state the genesis state.
parentRoot := bytesutil.ToBytes32(b.Block().ParentRoot())
if parentRoot == params.BeaconConfig().ZeroHash {
s, err := s.beaconDB.GenesisState(ctx)
return s, errors.Wrap(err, "could not get genesis state")
return s.beaconDB.GenesisState(ctx)
}
// Return an error if slot hasn't been covered by checkpoint sync.
@@ -269,13 +268,12 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
// Does the state exists in DB.
if s.beaconDB.HasState(ctx, parentRoot) {
s, err := s.beaconDB.State(ctx, parentRoot)
return s, errors.Wrap(err, "failed to retrieve state from db")
return s.beaconDB.State(ctx, parentRoot)
}
b, err = s.beaconDB.Block(ctx, parentRoot)
if err != nil {
return nil, errors.Wrap(err, "failed to retrieve block from db")
return nil, err
}
if b == nil || b.IsNil() {
return nil, errUnknownBlock

View File

@@ -13,16 +13,4 @@ var (
Buckets: []float64{64, 256, 1024, 2048, 4096},
},
)
replayBlocksSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "replay_blocks_milliseconds",
Help: "Time it took to replay blocks",
},
)
replayToSlotSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "replay_to_slot_milliseconds",
Help: "Time it took to replay to slot",
},
)
)

View File

@@ -63,6 +63,7 @@ type chainer interface {
}
type stateReplayer struct {
s state.BeaconState
target types.Slot
method retrievalMethod
chainer chainer
@@ -119,7 +120,6 @@ func (rs *stateReplayer) ReplayBlocks(ctx context.Context) (state.BeaconState, e
log.WithFields(logrus.Fields{
"duration": duration,
}).Debug("Finished calling process_blocks on all blocks in ReplayBlocks")
replayBlocksSummary.Observe(float64(duration.Milliseconds()))
return s, nil
}
@@ -151,14 +151,14 @@ func (rs *stateReplayer) ReplayToSlot(ctx context.Context, replayTo types.Slot)
// err will be handled after the bookend log
s, err = ReplayProcessSlots(ctx, s, replayTo)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("ReplayToSlot failed to seek to slot %d after applying blocks", replayTo))
}
duration := time.Since(start)
log.WithFields(logrus.Fields{
"duration": duration,
}).Debug("time spent in process_slots")
replayToSlotSummary.Observe(float64(duration.Milliseconds()))
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("ReplayToSlot failed to seek to slot %d after applying blocks", replayTo))
}
return s, nil
}

View File

@@ -131,7 +131,7 @@ func (s *Service) processFetchedData(
// Use Batch Block Verify to process and verify batches directly.
if err := s.processBatchedBlocks(ctx, genesis, data.blocks, s.cfg.Chain.ReceiveBlockBatch); err != nil {
log.WithError(err).Warn("Skip processing batched blocks")
log.WithError(err).Warn("Batch is not processed")
}
}
@@ -260,7 +260,7 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
headSlot := s.cfg.Chain.HeadSlot()
for headSlot >= firstBlock.Block().Slot() && s.isProcessedBlock(ctx, firstBlock, blkRoot) {
if len(blks) == 1 {
return fmt.Errorf("headSlot:%d, blockSlot:%d , root %#x:%w", headSlot, firstBlock.Block().Slot(), blkRoot, errBlockAlreadyProcessed)
return errors.New("no good blocks in batch")
}
blks = blks[1:]
firstBlock = blks[0]

View File

@@ -457,7 +457,7 @@ func TestService_processBlockBatch(t *testing.T) {
ctx context.Context, blocks []interfaces.SignedBeaconBlock, blockRoots [][32]byte) error {
return nil
})
assert.ErrorContains(t, "block is already processed", err)
assert.ErrorContains(t, "no good blocks in batch", err)
var badBatch2 []interfaces.SignedBeaconBlock
for i, b := range batch2 {

View File

@@ -89,38 +89,6 @@ var (
Buckets: []float64{250, 500, 1000, 1500, 2000, 4000, 8000, 16000},
},
)
// Attestation processing granular error tracking.
attBadBlockCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "gossip_attestation_bad_block_total",
Help: "Increased when a gossip attestation references a bad block",
})
attBadLmdConsistencyCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "gossip_attestation_bad_lmd_consistency_total",
Help: "Increased when a gossip attestation has bad LMD GHOST consistency",
})
attBadSelectionProofCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "gossip_attestation_bad_selection_proof_total",
Help: "Increased when a gossip attestation has a bad selection proof",
})
attBadSignatureBatchCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "gossip_attestation_bad_signature_batch_total",
Help: "Increased when a gossip attestation has a bad signature batch",
})
// Attestation and block gossip verification performance.
aggregateAttestationVerificationGossipSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "gossip_aggregate_attestation_verification_milliseconds",
Help: "Time to verify gossiped attestations",
},
)
blockVerificationGossipSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "gossip_block_verification_milliseconds",
Help: "Time to verify gossiped blocks",
},
)
)
func (s *Service) updateMetrics() {

View File

@@ -190,7 +190,7 @@ func (s *Service) writeBlockRangeToStream(ctx context.Context, startSlot, endSlo
continue
}
if chunkErr := s.chunkBlockWriter(stream, b); chunkErr != nil {
log.WithError(chunkErr).Debug("Could not send a chunked response")
log.WithError(chunkErr).Error("Could not send a chunked response")
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
tracing.AnnotateError(span, chunkErr)
return chunkErr

View File

@@ -178,7 +178,6 @@ func (s *Service) Start() {
s.processPendingBlocksQueue()
s.processPendingAttsQueue()
s.maintainPeerStatuses()
s.resyncIfBehind()
// Update sync metrics.
async.RunEvery(s.ctx, syncMetricsInterval, s.updateMetrics)

View File

@@ -20,7 +20,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v3/time"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
)
@@ -28,7 +27,6 @@ import (
// validateAggregateAndProof verifies the aggregated signature and the selection proof is valid before forwarding to the
// network and downstream services.
func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
receivedTime := prysmTime.Now()
if pid == s.cfg.p2p.PeerID() {
return pubsub.ValidationAccept, nil
}
@@ -42,6 +40,16 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
return pubsub.ValidationIgnore, nil
}
// We should not attempt to process this message if the node is running in optimistic mode.
// We just ignore in p2p so that the peer is not penalized.
optimistic, err := s.cfg.chain.IsOptimistic(ctx)
if err != nil {
return pubsub.ValidationReject, err
}
if optimistic {
return pubsub.ValidationIgnore, nil
}
raw, err := s.decodePubsubMessage(msg)
if err != nil {
tracing.AnnotateError(span, err)
@@ -77,11 +85,8 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
// processing tolerance.
if err := helpers.ValidateAttestationTime(
m.Message.Aggregate.Data.Slot,
s.cfg.chain.GenesisTime(),
earlyAttestationProcessingTolerance,
); err != nil {
if err := helpers.ValidateAttestationTime(m.Message.Aggregate.Data.Slot, s.cfg.chain.GenesisTime(),
earlyAttestationProcessingTolerance); err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
@@ -94,7 +99,6 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
attBadBlockCount.Inc()
return pubsub.ValidationReject, errors.New("bad block referenced in attestation data")
}
@@ -120,8 +124,6 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
msg.ValidatorData = m
aggregateAttestationVerificationGossipSummary.Observe(float64(prysmTime.Since(receivedTime).Milliseconds()))
return pubsub.ValidationAccept, nil
}
@@ -135,7 +137,6 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
// but it's invalid in the spirit of the protocol. Here we choose safety over profit.
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, signed.Message.Aggregate); err != nil {
tracing.AnnotateError(span, err)
attBadLmdConsistencyCount.Inc()
return pubsub.ValidationReject, err
}
@@ -177,7 +178,6 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
if err != nil {
wrappedErr := errors.Wrapf(err, "Could not validate selection for validator %d", signed.Message.AggregatorIndex)
tracing.AnnotateError(span, wrappedErr)
attBadSelectionProofCount.Inc()
return pubsub.ValidationReject, wrappedErr
}

Some files were not shown because too many files have changed in this diff Show More