Compare commits

...

32 Commits

Author SHA1 Message Date
Nishant Das
6f0eb4750e Change Default Timeout To 30 Seconds (#11487) 2022-09-22 22:59:36 +08:00
Nishant Das
653ea3b030 Fix Gossipsub Parameter (#11425)
(cherry picked from commit fbe591c363)
2022-09-09 10:47:29 -05:00
Potuz
17e1986063 Prune during on_tick (#11387)
* Prune during on_tick

* add unit test

* fix tests

Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
(cherry picked from commit fc509cc220)
2022-09-09 08:50:57 -05:00
terencechain
8627fe72e8 Remove activation/exit queue metrics (#11389)
* Remove activation/exit queue metrics

* Gaz

* Rm unused vars

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-09-01 19:11:25 +00:00
Raul Jordan
65bf3d0fa8 Fix Div By 0 in Small Helper (#11390) 2022-09-01 18:26:28 +00:00
Raul Jordan
a5da9aedd4 Add in P2P Metrics for Mainnet (#11386)
* connected peers gauge vec

* build

* add in gossip metric

* clean
2022-09-01 18:00:54 +00:00
Potuz
e1ab034d25 Defensive pulls protoarray (#11385)
* Defensive pull tips protoarray

* unit test

* gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-09-01 15:05:44 +00:00
Potuz
84bc8f3d64 Fix fillInMissingBlocks (#11353)
* Fix fillInMissingBlocks

Only check that the chain's parent is in forkchoice, rather than it
being the finalized checkpoint. Forkchoice anyway guarantees that the
chain will be a descendant of the finalized checkpoint.

* ensure root is not zero

* fix tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-09-01 14:40:32 +00:00
Radosław Kapka
c4deb84012 Simplify BeaconBlockIsNil() (#11373)
* Simplify `BeaconBlockIsNil()`

* remove unused code

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2022-09-01 03:40:20 +00:00
kasey
488e19e428 less ominous --weak-subjectivity-checkpoint warning (#11362)
* fix #11361

* change log level to debug

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-09-01 01:56:56 +00:00
Raul Jordan
bcaae1c440 Performance Metrics for Prysm (#11377)
* atts performance and blocks

* idiomatic observe

* all attestation related errors

* block metrics

* db metrics

* metrics func

* rem old metrics

* naming

* rem metric

* rem unneeded

* fix

* fix up

* rev

* fix

* rem
2022-09-01 01:26:19 +00:00
terencechain
587ba83aca Better batch block processing warning (#11372)
* Better batch block warning

* Fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-09-01 00:57:55 +00:00
terencechain
091f16b26c Don't hard shutdown if mev-boost / relay is not available (#11380)
* Don't hard shtudown if mev-boost / relay is not available

* Add else
2022-08-31 23:58:27 +00:00
Potuz
fb9626fdd7 Feature flag to disregard deposit contract (#11370)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-08-31 22:35:59 +00:00
terencechain
c638e114db Add new metrics (#11374)
* Better batch block warning

* New metrics

* Revert "Better batch block warning"

This reverts commit e21fcfcebe.

* More metrics

* Add activation and exit queues

* Gaz
2022-08-31 18:05:50 -04:00
terencechain
b1e08307ed Fix time to duty to round slot number (#11371)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-08-31 19:54:44 +00:00
kasey
cac5d0f234 giving commands more clear names per issue #11287 (#11360)
* giving commands more clear names per issue #11287

* mark the top-level help text for cpt deprecated

Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2022-08-31 13:18:35 -05:00
james-prysm
52d48b328f Improve Validator Index RPC Error Handling (#11363)
* adding in nil check for head

* adding changes based on feedback

* Update beacon-chain/rpc/prysm/v1alpha1/validator/server_test.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2022-08-31 16:42:49 +00:00
Nishant Das
9729b2ec77 Remove The Header Time Check (#11329)
* remove the check

* remove function and tests

* dead code

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-08-31 14:54:29 +00:00
terencechain
7aa3776aa6 Log tx count only on payload (#11368) 2022-08-31 14:38:44 +02:00
Potuz
760c71ef77 Only update finalized checkpoint in DB if it's newer (#11356)
* Only update finalized checkpoint in DB if it's newer

Do not save to DB a finalized checkpoint that it's older than the
current one.

* Add a test

* Add more strict condition check

* Revert params.SetupTestConfigCleanupWithLock(t)

* Remove new line

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2022-08-31 00:16:22 +00:00
james-prysm
6c209db3ca fixing json unmarshalling (#11357)
* fixing json unmarshalling

* adding unit test for no conent

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-08-30 19:31:23 +00:00
Raul Jordan
0725905797 Informative Errors on Execution Client Connection Issues (#11359)
* add err auth help

* error working

* add err auth fix
2022-08-30 19:09:42 +00:00
terencechain
166f8a1eb6 Log corerct header value (#11354)
* Log corerct header value

* gaz

* Go fmt

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-08-30 16:23:21 +00:00
Radosław Kapka
85896e994e Explain the purpose of deprecatedBeaconFlags (#11355) 2022-08-30 15:47:36 +00:00
Nishant Das
4a00b295ed Pin Fuzzbuzz to Go 1.18 (#11350) 2022-08-30 10:18:23 +02:00
Potuz
d2b39e9697 Defensive pull tips, doubly-linked-tree (#11175)
* Defensive pull tips, doubly-linked-tree

* feature flag

* gaz

Co-authored-by: terencechain <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-08-30 00:48:25 +00:00
Shem Leong
97dc86e742 Support passing of headers to all Engine API calls (#11330)
* Support passing of headers to all Engine API calls

* Update execution headers example

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2022-08-29 23:34:29 +00:00
terencechain
cff3b99918 Fix can propose blind block (#11346) 2022-08-29 13:30:28 -07:00
terencechain
be9847f23c Remove unused code (#11345) 2022-08-29 18:03:03 +00:00
Håvard Anda Estensen
4796827d22 Replace deprecated linter deadcode with unused (#11334)
* Replace deprecated linter deadcode with unused

* Ignore unused warnings

* Print filename and line number when linting fails

* Fix path

* Remove unused methods

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: terencechain <terence@prysmaticlabs.com>
2022-08-29 12:45:25 -04:00
Preston Van Loon
57b7e0b572 db: Wrap errors in db.fetchAncestor to better identify unmarshalling issues (#11342)
* db: Wrap errors in db.fetchAncestor to better identify unmarshalling issues. See #11327

* Wrap genesis state fetch, just in case

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2022-08-29 16:08:03 +00:00
94 changed files with 880 additions and 366 deletions

View File

@@ -53,6 +53,7 @@ jobs:
uses: golangci/golangci-lint-action@v3
with:
version: v1.47.2
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
build:
name: Build

View File

@@ -13,7 +13,7 @@ linters:
enable:
- gofmt
- goimports
- deadcode
- unused
- errcheck
- gosimple
- gocognit

View File

@@ -21,14 +21,13 @@ import (
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
// using Checkpoint Sync.
type OriginData struct {
wsd *WeakSubjectivityData
sb []byte
bb []byte
st state.BeaconState
b interfaces.SignedBeaconBlock
vu *detect.VersionedUnmarshaler
br [32]byte
sr [32]byte
sb []byte
bb []byte
st state.BeaconState
b interfaces.SignedBeaconBlock
vu *detect.VersionedUnmarshaler
br [32]byte
sr [32]byte
}
// SaveBlock saves the downloaded block to a unique file in the given path.

View File

@@ -95,8 +95,6 @@ func WithTimeout(timeout time.Duration) ClientOpt {
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
type Client struct {
hc *http.Client
host string
scheme string
baseURL *url.URL
}

View File

@@ -274,9 +274,6 @@ func non200Err(response *http.Response) error {
if err != nil {
body = "(Unable to read response body.)"
} else {
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
body = "response body:\n" + string(bodyBytes)
}
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
@@ -285,13 +282,25 @@ func non200Err(response *http.Response) error {
log.WithError(ErrNoContent).Debug(msg)
return ErrNoContent
case 400:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrBadRequest).Debug(msg)
return errors.Wrap(ErrBadRequest, errMessage.Message)
case 404:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrNotFound).Debug(msg)
return errors.Wrap(ErrNotFound, errMessage.Message)
default:
case 500:
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
return errors.Wrap(jsonErr, "unable to read response body")
}
log.WithError(ErrNotOK).Debug(msg)
return errors.Wrap(ErrNotOK, errMessage.Message)
default:
log.WithError(ErrNotOK).Debug(msg)
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))
}
}

View File

@@ -144,6 +144,23 @@ func TestClient_GetHeader(t *testing.T) {
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNotOK)
hc = &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)
return &http.Response{
StatusCode: http.StatusNoContent,
Body: io.NopCloser(bytes.NewBuffer([]byte("No header is available."))),
Request: r.Clone(ctx),
}, nil
}),
}
c = &Client{
hc: hc,
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
}
_, err = c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
require.ErrorIs(t, err, ErrNoContent)
hc = &http.Client{
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
require.Equal(t, expectedPath, r.URL.Path)

View File

@@ -89,7 +89,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
}
return payloadID, nil
case execution.ErrInvalidPayloadStatus:
newPayloadInvalidNodeCount.Inc()
forkchoiceUpdatedInvalidNodeCount.Inc()
headRoot := arg.headRoot
if len(lastValidHash) == 0 {
lastValidHash = defaultLatestValidHash

View File

@@ -1155,6 +1155,18 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
require.Equal(t, false, optimistic)
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
// Checkpoint with a lower epoch
oldCp, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
require.NoError(t, err)
invalidCp := &ethpb.Checkpoint{
Epoch: oldCp.Epoch - 1,
}
// Nothing should happen as we no-op on an invalid checkpoint.
require.NoError(t, service.updateFinalized(ctx, invalidCp))
got, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
require.NoError(t, err)
require.DeepEqual(t, oldCp, got)
}
func TestService_removeInvalidBlockAndState(t *testing.T) {

View File

@@ -8,6 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/config/params"
consensusBlocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
@@ -51,10 +52,15 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
}
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
if err != nil {
switch {
case errors.Is(err, consensusBlocks.ErrUnsupportedGetter):
case err != nil:
return err
default:
log = log.WithField("txCount", len(txs))
txsPerSlotCount.Set(float64(len(txs)))
}
log = log.WithField("txCount", len(txs))
}
log.Info("Finished applying state transition")
return nil

View File

@@ -158,10 +158,47 @@ var (
Name: "forkchoice_updated_optimistic_node_count",
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
})
forkchoiceUpdatedInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "forkchoice_updated_invalid_node_count",
Help: "Count the number of invalid nodes after forkchoiceUpdated EE call",
})
txsPerSlotCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "txs_per_slot_count",
Help: "Count the number of txs per slot",
})
missedPayloadIDFilledCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "missed_payload_id_filled_count",
Help: "",
})
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "on_block_processing_milliseconds",
Help: "Total time in milliseconds to complete a call to onBlock()",
})
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "state_transition_processing_milliseconds",
Help: "Total time to call a state transition in onBlock()",
})
processAttsElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "process_attestations_milliseconds",
Help: "Captures latency for process attestations (forkchoice) in milliseconds",
Buckets: []float64{1, 5, 20, 100, 500, 1000},
},
)
newAttHeadElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "new_att_head_milliseconds",
Help: "Captures latency for new attestation head in milliseconds",
Buckets: []float64{1, 5, 20, 100, 500, 1000},
},
)
newBlockHeadElapsedTime = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "new_block_head_milliseconds",
Help: "Captures latency for new block head in milliseconds",
Buckets: []float64{1, 5, 20, 100, 500, 1000},
},
)
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -98,6 +98,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
return invalidBlock{error: err}
}
startTime := time.Now()
b := signed.Block()
preState, err := s.getBlockPreState(ctx, b)
@@ -115,10 +116,13 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
if err != nil {
return err
}
stateTransitionStartTime := time.Now()
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
if err != nil {
return invalidBlock{error: err}
}
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
if err != nil {
return err
@@ -182,10 +186,14 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
return errors.Wrap(err, msg)
}
start := time.Now()
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
log.WithError(err).Warn("Could not update head")
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
if err := s.notifyEngineIfChangedHead(ctx, headRoot); err != nil {
return err
}
@@ -262,7 +270,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
}
defer reportAttestationInclusion(b)
return s.handleEpochBoundary(ctx, postState)
if err := s.handleEpochBoundary(ctx, postState); err != nil {
return err
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
func getStateVersionAndPayload(st state.BeaconState) (int, *enginev1.ExecutionPayloadHeader, error) {

View File

@@ -135,11 +135,21 @@ func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
}
// updateFinalized saves the init sync blocks, finalized checkpoint, migrates
// to cold old states and saves the last validated checkpoint to DB
// to cold old states and saves the last validated checkpoint to DB. It returns
// early if the new checkpoint is older than the one on db.
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
ctx, span := trace.StartSpan(ctx, "blockChain.updateFinalized")
defer span.End()
// return early if new checkpoint is not newer than the one in DB
currentFinalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
if err != nil {
return err
}
if cp.Epoch <= currentFinalized.Epoch {
return nil
}
// Blocks need to be saved so that we can retrieve finalized block from
// DB when migrating states.
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
@@ -267,7 +277,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
if len(pendingNodes) == 1 {
return nil
}
if root != s.ensureRootNotZeros(finalized.Root) {
if root != s.ensureRootNotZeros(finalized.Root) && !s.ForkChoicer().HasNode(root) {
return errNotDescendantOfFinalized
}
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)

View File

@@ -1170,6 +1170,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
testState := gs.Copy()
for i := types.Slot(1); i <= 4*params.BeaconConfig().SlotsPerEpoch; i++ {

View File

@@ -147,17 +147,22 @@ func (s *Service) UpdateHead(ctx context.Context) error {
s.processAttestationsLock.Lock()
defer s.processAttestationsLock.Unlock()
start := time.Now()
s.processAttestations(ctx)
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
justified := s.ForkChoicer().JustifiedCheckpoint()
balances, err := s.justifiedBalances.get(ctx, justified.Root)
if err != nil {
return err
}
start = time.Now()
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
if err != nil {
log.WithError(err).Warn("Resolving fork due to new attestation")
}
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
s.headLock.RLock()
if s.headRoot() != newHeadRoot {
log.WithFields(logrus.Fields{

View File

@@ -30,8 +30,7 @@ type WeakSubjectivityVerifier struct {
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier.
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
log.Info("--weak-subjectivity-checkpoint not provided. Prysm recommends providing a weak subjectivity checkpoint " +
"for nodes synced from genesis, or manual verification of block and state roots for checkpoint sync nodes.")
log.Debug("--weak-subjectivity-checkpoint not provided")
return &WeakSubjectivityVerifier{
enabled: false,
}, nil

View File

@@ -2,7 +2,6 @@ package builder
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
@@ -66,12 +65,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
// Is the builder up?
if err := s.c.Status(ctx); err != nil {
return nil, fmt.Errorf("could not connect to builder: %v", err)
log.WithError(err).Error("Failed to check builder status")
} else {
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
}
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
}
return s, nil
}

View File

@@ -7,6 +7,7 @@ go_library(
"beacon_committee.go",
"block.go",
"genesis.go",
"metrics.go",
"randao.go",
"rewards_penalties.go",
"shuffle.go",

View File

@@ -51,10 +51,11 @@ func ValidateSlotTargetEpoch(data *ethpb.AttestationData) error {
// committee count as an argument allows cheaper computation at run time.
//
// Spec pseudocode definition:
// def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
// committee = get_beacon_committee(state, slot, index)
// modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
// return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
//
// def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
// committee = get_beacon_committee(state, slot, index)
// modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
// return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
modulo := uint64(1)
if committeeCount/params.BeaconConfig().TargetAggregatorsPerCommittee > 1 {
@@ -68,9 +69,10 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
// AggregateSignature returns the aggregated signature of the input attestations.
//
// Spec pseudocode definition:
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
// signatures = [attestation.signature for attestation in attestations]
// return bls.Aggregate(signatures)
//
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
// signatures = [attestation.signature for attestation in attestations]
// return bls.Aggregate(signatures)
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
sigs := make([]bls.Signature, len(attestations))
var err error
@@ -95,14 +97,15 @@ func IsAggregated(attestation *ethpb.Attestation) bool {
//
// Spec pseudocode definition:
// def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
// """
// Compute the correct subnet for an attestation for Phase 0.
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
// """
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
//
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
// """
// Compute the correct subnet for an attestation for Phase 0.
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
// """
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
//
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
func ComputeSubnetForAttestation(activeValCount uint64, att *ethpb.Attestation) uint64 {
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.Data.CommitteeIndex, att.Data.Slot)
}
@@ -112,14 +115,15 @@ func ComputeSubnetForAttestation(activeValCount uint64, att *ethpb.Attestation)
//
// Spec pseudocode definition:
// def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
// """
// Compute the correct subnet for an attestation for Phase 0.
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
// """
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
//
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
// """
// Compute the correct subnet for an attestation for Phase 0.
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
// """
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
//
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx types.CommitteeIndex, attSlot types.Slot) uint64 {
slotSinceStart := slots.SinceEpochStarts(attSlot)
comCount := SlotCommitteeCount(activeValCount)
@@ -133,13 +137,15 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx types.Commi
// slots.
//
// Example:
// ATTESTATION_PROPAGATION_SLOT_RANGE = 5
// clockDisparity = 24 seconds
// current_slot = 100
// invalid_attestation_slot = 92
// invalid_attestation_slot = 103
// valid_attestation_slot = 98
// valid_attestation_slot = 101
//
// ATTESTATION_PROPAGATION_SLOT_RANGE = 5
// clockDisparity = 24 seconds
// current_slot = 100
// invalid_attestation_slot = 92
// invalid_attestation_slot = 103
// valid_attestation_slot = 98
// valid_attestation_slot = 101
//
// In the attestation must be within the range of 95 to 102 in the example above.
func ValidateAttestationTime(attSlot types.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
if err := slots.ValidateClock(attSlot, uint64(genesisTime.Unix())); err != nil {
@@ -170,13 +176,19 @@ func ValidateAttestationTime(attSlot types.Slot, genesisTime time.Time, clockDis
lowerBounds := lowerTime.Add(-clockDisparity)
// Verify attestation slot within the time range.
if attTime.Before(lowerBounds) || attTime.After(upperBounds) {
return fmt.Errorf(
"attestation slot %d not within attestation propagation range of %d to %d (current slot)",
attSlot,
lowerBoundsSlot,
currentSlot,
)
attError := fmt.Errorf(
"attestation slot %d not within attestation propagation range of %d to %d (current slot)",
attSlot,
lowerBoundsSlot,
currentSlot,
)
if attTime.Before(lowerBounds) {
attReceivedTooEarlyCount.Inc()
return attError
}
if attTime.After(upperBounds) {
attReceivedTooLateCount.Inc()
return attError
}
return nil
}

View File

@@ -0,0 +1,17 @@
package helpers
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
attReceivedTooEarlyCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "attestation_too_early_total",
Help: "Increased when an attestation is considered too early",
})
attReceivedTooLateCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "attestation_too_late_total",
Help: "Increased when an attestation is considered too late",
})
)

View File

@@ -57,6 +57,7 @@ go_library(
"//monitoring/tracing:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",

View File

@@ -55,6 +55,14 @@ var (
Name: "validator_entry_cache_delete_total",
Help: "The total number of cache deletes on the validator entry cache.",
})
stateReadingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "db_beacon_state_reading_milliseconds",
Help: "Milliseconds it takes to read a beacon state from the DB",
})
stateSavingTime = promauto.NewSummary(prometheus.SummaryOpts{
Name: "db_beacon_state_saving_milliseconds",
Help: "Milliseconds it takes to save a beacon state to the DB",
})
)
// BlockCacheSize specifies 1000 slots worth of blocks cached, which

View File

@@ -20,6 +20,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v3/time"
"github.com/prysmaticlabs/prysm/v3/time/slots"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/trace"
@@ -30,6 +31,7 @@ import (
func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "BeaconDB.State")
defer span.End()
startTime := time.Now()
enc, err := s.stateBytes(ctx, blockRoot)
if err != nil {
return nil, err
@@ -44,7 +46,12 @@ func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconStat
return nil, valErr
}
return s.unmarshalState(ctx, enc, valEntries)
st, err := s.unmarshalState(ctx, enc, valEntries)
if err != nil {
return nil, err
}
stateReadingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return st, err
}
// StateOrError is just like State(), except it only returns a non-error response
@@ -127,6 +134,7 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
if states == nil {
return errors.New("nil state")
}
startTime := time.Now()
multipleEncs := make([][]byte, len(states))
for i, st := range states {
stateBytes, err := marshalState(ctx, st)
@@ -136,7 +144,7 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
multipleEncs[i] = stateBytes
}
return s.db.Update(func(tx *bolt.Tx) error {
if err := s.db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(stateBucket)
for i, rt := range blockRoots {
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
@@ -148,7 +156,11 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
}
}
return nil
})
}); err != nil {
return err
}
stateSavingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
type withValidators interface {
@@ -760,8 +772,10 @@ func createStateIndicesFromStateSlot(ctx context.Context, slot types.Slot) map[s
// Only following states would be kept:
// 1.) state_slot % archived_interval == 0. (e.g. archived_interval=2048, states with slot 2048, 4096... etc)
// 2.) archived_interval - archived_interval/3 < state_slot % archived_interval
// (e.g. archived_interval=2048, states with slots after 1365).
// This is to tolerate skip slots. Not every state lays on the boundary.
//
// (e.g. archived_interval=2048, states with slots after 1365).
// This is to tolerate skip slots. Not every state lays on the boundary.
//
// 3.) state with current finalized root
// 4.) unfinalized States
func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error {

View File

@@ -24,12 +24,18 @@ var (
configMismatchLog = "Configuration mismatch between your execution client and Prysm. " +
"Please check your execution client and restart it with the proper configuration. If this is not done, " +
"your node will not be able to complete the proof-of-stake transition"
needsEnginePortLog = "Could not check execution client configuration. " +
"You are probably connecting to your execution client on the wrong port. For the Ethereum " +
"merge, you will need to connect to your " +
"execution client on port 8551 rather than 8545. This is known as the 'engine API' port and needs to be " +
"authenticated if connecting via HTTP. See our documentation on how to set up this up here " +
"https://docs.prylabs.network/docs/execution-node/authentication"
)
// Checks the transition configuration between Prysm and the connected execution node to ensure
// there are no differences in terminal block difficulty and block hash.
// If there are any discrepancies, we must log errors to ensure users can resolve
//the problem and be ready for the merge transition.
// the problem and be ready for the merge transition.
func (s *Service) checkTransitionConfiguration(
ctx context.Context, blockNotifications chan *feed.Event,
) {
@@ -48,10 +54,14 @@ func (s *Service) checkTransitionConfiguration(
}
err := s.ExchangeTransitionConfiguration(ctx, cfg)
if err != nil {
if errors.Is(err, ErrConfigMismatch) {
switch {
case errors.Is(err, ErrConfigMismatch):
log.WithError(err).Fatal(configMismatchLog)
case errors.Is(err, ErrMethodNotFound):
log.WithError(err).Error(needsEnginePortLog)
default:
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
}
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
}
// We poll the execution client to see if the transition configuration has changed.
@@ -115,6 +125,9 @@ func (s *Service) handleExchangeConfigurationError(err error) {
s.runError = err
log.WithError(err).Error(configMismatchLog)
return
} else if errors.Is(err, ErrMethodNotFound) {
log.WithError(err).Error(needsEnginePortLog)
return
}
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
}

View File

@@ -537,22 +537,31 @@ func handleRPCError(err error) error {
}
switch e.ErrorCode() {
case -32700:
errParseCount.Inc()
return ErrParse
case -32600:
errInvalidRequestCount.Inc()
return ErrInvalidRequest
case -32601:
errMethodNotFoundCount.Inc()
return ErrMethodNotFound
case -32602:
errInvalidParamsCount.Inc()
return ErrInvalidParams
case -32603:
errInternalCount.Inc()
return ErrInternal
case -38001:
errUnknownPayloadCount.Inc()
return ErrUnknownPayload
case -38002:
errInvalidForkchoiceStateCount.Inc()
return ErrInvalidForkchoiceState
case -38003:
errInvalidPayloadAttributesCount.Inc()
return ErrInvalidPayloadAttributes
case -32000:
errServerErrorCount.Inc()
// Only -32000 status codes are data errors in the RPC specification.
errWithData, ok := err.(rpc.DataError)
if !ok {

View File

@@ -31,6 +31,42 @@ var (
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
},
)
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_parse_error_count",
Help: "The number of errors that occurred while parsing execution payload",
})
errInvalidRequestCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_invalid_request_count",
Help: "The number of errors that occurred due to invalid request",
})
errMethodNotFoundCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_method_not_found_count",
Help: "The number of errors that occurred due to method not found",
})
errInvalidParamsCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_invalid_params_count",
Help: "The number of errors that occurred due to invalid params",
})
errInternalCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_internal_error_count",
Help: "The number of errors that occurred due to internal error",
})
errUnknownPayloadCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_unknown_payload_count",
Help: "The number of errors that occurred due to unknown payload",
})
errInvalidForkchoiceStateCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_invalid_forkchoice_state_count",
Help: "The number of errors that occurred due to invalid forkchoice state",
})
errInvalidPayloadAttributesCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_invalid_payload_attributes_count",
Help: "The number of errors that occurred due to invalid payload attributes",
})
errServerErrorCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "execution_server_error_count",
Help: "The number of errors that occurred due to server error",
})
reconstructedExecutionPayloadCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "reconstructed_execution_payload_count",
Help: "Count the number of execution payloads that are reconstructed using JSON-RPC from payload headers",

View File

@@ -36,6 +36,14 @@ func WithHttpEndpointAndJWTSecret(endpointString string, secret []byte) Option {
}
}
// WithHeaders adds headers to the execution node JSON-RPC requests.
func WithHeaders(headers []string) Option {
return func(s *Service) error {
s.cfg.headers = headers
return nil
}
}
// WithDepositContractAddress for the deposit contract.
func WithDepositContractAddress(addr common.Address) Option {
return func(s *Service) error {

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/url"
"strings"
"time"
"github.com/ethereum/go-ethereum/ethclient"
@@ -37,7 +38,13 @@ func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpo
// Ensure we have the correct chain and deposit IDs.
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
client.Close()
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
errStr := err.Error()
if strings.Contains(errStr, "401 Unauthorized") {
errStr = "could not verify execution chain ID as your connection is not authenticated. " +
"If connecting to your execution client via HTTP, you will need to set up JWT authentication. " +
"See our documentation here https://docs.prylabs.network/docs/execution-node/authentication"
}
return errors.Wrap(err, errStr)
}
s.updateConnectedETH1(true)
s.runError = nil
@@ -128,6 +135,16 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
}
client.SetHeader("Authorization", header)
}
for _, h := range s.cfg.headers {
if h != "" {
keyValue := strings.Split(h, "=")
if len(keyValue) < 2 {
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
continue
}
client.SetHeader(keyValue[0], strings.Join(keyValue[1:], "="))
}
}
return client, nil
}

View File

@@ -66,10 +66,6 @@ var (
logThreshold = 8
// period to log chainstart related information
logPeriod = 1 * time.Minute
// threshold of how old we will accept an eth1 node's head to be.
eth1Threshold = 20 * time.Minute
// error when eth1 node is too far behind.
errFarBehind = errors.Errorf("eth1 head is more than %s behind from current wall clock time", eth1Threshold.String())
)
// ChainStartFetcher retrieves information pertaining to the chain start event
@@ -128,6 +124,7 @@ type config struct {
eth1HeaderReqLimit uint64
beaconNodeStatsUpdater BeaconNodeStatsUpdater
currHttpEndpoint network.Endpoint
headers []string
finalizedStateAtStartup state.BeaconState
}
@@ -316,11 +313,6 @@ func (s *Service) updateBeaconNodeStats() {
s.cfg.beaconNodeStatsUpdater.Update(bs)
}
func (s *Service) updateCurrHttpEndpoint(endpoint network.Endpoint) {
s.cfg.currHttpEndpoint = endpoint
s.updateBeaconNodeStats()
}
func (s *Service) updateConnectedETH1(state bool) {
s.connectedETH1 = state
s.updateBeaconNodeStats()
@@ -608,11 +600,6 @@ func (s *Service) run(done <-chan struct{}) {
log.WithError(err).Debug("Could not fetch latest eth1 header")
continue
}
if eth1HeadIsBehind(head.Time) {
s.pollConnectionStatus(s.ctx)
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
continue
}
s.processBlockHeader(head)
s.handleETH1FollowDistance()
case <-chainstartTicker.C:
@@ -838,11 +825,3 @@ func dedupEndpoints(endpoints []string) []string {
}
return newEndpoints
}
// Checks if the provided timestamp is beyond the prescribed bound from
// the current wall clock time.
func eth1HeadIsBehind(timestamp uint64) bool {
timeout := prysmTime.Now().Add(-eth1Threshold)
// check that web3 client is syncing
return time.Unix(int64(timestamp), 0).Before(timeout) // lint:ignore uintcast -- timestamp will not exceed int64 in your lifetime.
}

View File

@@ -146,7 +146,7 @@ func TestStart_OK(t *testing.T) {
WithDepositContractAddress(testAcc.ContractAddr),
WithDatabase(beaconDB),
)
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
require.NoError(t, err, "unable to setup execution service")
web3Service = setDefaultMocks(web3Service)
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
web3Service.depositContractCaller, err = contracts.NewDepositContractCaller(testAcc.ContractAddr, testAcc.Backend)
@@ -156,7 +156,7 @@ func TestStart_OK(t *testing.T) {
web3Service.Start()
if len(hook.Entries) > 0 {
msg := hook.LastEntry().Message
want := "Could not connect to ETH1.0 chain RPC client"
want := "Could not connect to execution endpoint"
if strings.Contains(want, msg) {
t.Errorf("incorrect log, expected %s, got %s", want, msg)
}
@@ -752,15 +752,6 @@ func TestService_ValidateDepositContainers(t *testing.T) {
}
}
func TestTimestampIsChecked(t *testing.T) {
timestamp := uint64(time.Now().Unix())
assert.Equal(t, false, eth1HeadIsBehind(timestamp))
// Give an older timestmap beyond threshold.
timestamp = uint64(time.Now().Add(-eth1Threshold).Add(-1 * time.Minute).Unix())
assert.Equal(t, true, eth1HeadIsBehind(timestamp))
}
func TestETH1Endpoints(t *testing.T) {
server, firstEndpoint, err := mockExecution.SetupRPCServer()
require.NoError(t, err)

View File

@@ -63,6 +63,7 @@ go_test(
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v3:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -3,6 +3,7 @@ package doublylinkedtree
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
@@ -35,7 +36,6 @@ func New() *ForkChoice {
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
slashedIndices: make(map[types.ValidatorIndex]bool),
pruneThreshold: defaultPruneThreshold,
receivedBlocksLastEpoch: [fieldparams.SlotsPerEpoch]types.Slot{},
}
@@ -83,7 +83,8 @@ func (f *ForkChoice) Head(
jc := f.JustifiedCheckpoint()
fc := f.FinalizedCheckpoint()
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch); err != nil {
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(f.store.genesisTime), 0))
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
}
return f.store.head(ctx)

View File

@@ -208,7 +208,7 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1))
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
f.store.nodesLock.Unlock()

View File

@@ -5,20 +5,12 @@ import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
)
// depth returns the length of the path to the root of Fork Choice
func (n *Node) depth() uint64 {
ret := uint64(0)
for node := n.parent; node != nil; node = node.parent {
ret += 1
}
return ret
}
// applyWeightChanges recomputes the weight of the node passed as an argument and all of its descendants,
// using the current balance stored in each node. This function requires a lock
// in Store.nodesLock
@@ -43,7 +35,7 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
// updateBestDescendant updates the best descendant of this node and its
// children. This function assumes the caller has a lock on Store.nodesLock
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch) error {
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) error {
if ctx.Err() != nil {
return ctx.Err()
}
@@ -59,10 +51,10 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
if child == nil {
return errors.Wrap(ErrNilNode, "could not update best descendant")
}
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
return err
}
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch)
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch)
if childLeadsToViableHead && !hasViableDescendant {
// The child leads to a viable head, but the current
// parent's best child doesn't.
@@ -97,18 +89,24 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
// viableForHead returns true if the node is viable to head.
// Any node with different finalized or justified epoch than
// the ones in fork choice store should not be viable to head.
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) bool {
justified := justifiedEpoch == n.justifiedEpoch || justifiedEpoch == 0
finalized := finalizedEpoch == n.finalizedEpoch || finalizedEpoch == 0
if features.Get().EnableDefensivePull && !justified && justifiedEpoch+1 == currentEpoch {
if n.unrealizedJustifiedEpoch+1 >= currentEpoch {
justified = true
finalized = true
}
}
return justified && finalized
}
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) bool {
if n.bestDescendant == nil {
return n.viableForHead(justifiedEpoch, finalizedEpoch)
return n.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
}
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
}
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).

View File

@@ -114,7 +114,7 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 100
s.nodeByRoot[indexToHash(2)].weight = 200
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
@@ -134,31 +134,12 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
s := f.store
s.nodeByRoot[indexToHash(1)].weight = 200
s.nodeByRoot[indexToHash(2)].weight = 100
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
assert.Equal(t, 2, len(s.treeRootNode.children))
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
}
func TestNode_TestDepth(t *testing.T) {
f := setup(1, 1)
ctx := context.Background()
// Input child is best descendant
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
s := f.store
require.Equal(t, s.nodeByRoot[indexToHash(2)].depth(), uint64(2))
require.Equal(t, s.nodeByRoot[indexToHash(3)].depth(), uint64(1))
}
func TestNode_ViableForHead(t *testing.T) {
tests := []struct {
n *Node
@@ -174,7 +155,7 @@ func TestNode_ViableForHead(t *testing.T) {
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
}
for _, tc := range tests {
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch)
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch, 5)
assert.Equal(t, tc.want, got)
}
}
@@ -198,10 +179,10 @@ func TestNode_LeadsToViableHead(t *testing.T) {
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3))
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3, 5))
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3, 5))
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3, 5))
}
func TestNode_SetFullyValidated(t *testing.T) {

View File

@@ -70,5 +70,5 @@ func (f *ForkChoice) NewSlot(ctx context.Context, slot types.Slot) error {
if !features.Get().DisablePullTips {
f.updateUnrealizedCheckpoints()
}
return nil
return f.store.prune(ctx)
}

View File

@@ -12,10 +12,6 @@ import (
"go.opencensus.io/trace"
)
// This defines the minimal number of block nodes that can be in the tree
// before getting pruned upon new finalization.
const defaultPruneThreshold = 256
// applyProposerBoostScore applies the current proposer boost scores to the
// relevant nodes. This function requires a lock in Store.nodesLock.
func (s *Store) applyProposerBoostScore(newBalances []uint64) error {
@@ -59,11 +55,6 @@ func (s *Store) proposerBoost() [fieldparams.RootLength]byte {
return s.proposerBoostRoot
}
// PruneThreshold of fork choice store.
func (s *Store) PruneThreshold() uint64 {
return s.pruneThreshold
}
// head starts from justified root and then follows the best descendant links
// to find the best block for head. This function assumes a lock on s.nodesLock
func (s *Store) head(ctx context.Context) ([32]byte, error) {
@@ -95,8 +86,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
if bestDescendant == nil {
bestDescendant = justifiedNode
}
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch) {
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(s.genesisTime), 0))
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch, currentEpoch) {
s.allTipsAreInvalid = true
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch, justified Epoch %d, %d != %d, %d",
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, bestDescendant.justifiedEpoch, s.finalizedCheckpoint.Epoch, s.justifiedCheckpoint.Epoch)
@@ -175,7 +166,7 @@ func (s *Store) insert(ctx context.Context,
jEpoch := s.justifiedCheckpoint.Epoch
fEpoch := s.finalizedCheckpoint.Epoch
s.checkpointsLock.RUnlock()
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch); err != nil {
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
return n, err
}
}
@@ -215,8 +206,7 @@ func (s *Store) pruneFinalizedNodeByRootMap(ctx context.Context, node, finalized
return nil
}
// prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
// root is different than the current store finalized root, and the number of the store has met prune threshold.
// prune prunes the fork choice store. It removes all nodes that compete with the finalized root.
// This function does not prune for invalid optimistically synced nodes, it deals only with pruning upon finalization
func (s *Store) prune(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.Prune")
@@ -232,10 +222,8 @@ func (s *Store) prune(ctx context.Context) error {
if !ok || finalizedNode == nil {
return errUnknownFinalizedRoot
}
// The number of the nodes has not met the prune threshold.
// Pruning at small numbers incurs more cost than benefit.
if finalizedNode.depth() < s.pruneThreshold {
// return early if we haven't changed the finalized checkpoint
if finalizedNode.parent == nil {
return nil
}

View File

@@ -12,15 +12,6 @@ import (
"github.com/prysmaticlabs/prysm/v3/testing/require"
)
func TestStore_PruneThreshold(t *testing.T) {
s := &Store{
pruneThreshold: defaultPruneThreshold,
}
if got := s.PruneThreshold(); got != defaultPruneThreshold {
t.Errorf("PruneThreshold() = %v, want %v", got, defaultPruneThreshold)
}
}
func TestStore_JustifiedEpoch(t *testing.T) {
j := types.Epoch(100)
f := setup(j, j)
@@ -154,30 +145,6 @@ func TestStore_Insert(t *testing.T) {
assert.Equal(t, indexToHash(100), child.root, "Incorrect root")
}
func TestStore_Prune_LessThanThreshold(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
for i := uint64(2); i < numOfNodes; i++ {
state, blkRoot, err = prepareForkchoiceState(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
}
s := f.store
s.pruneThreshold = 100
// Finalized root has depth 99 so everything before it should be pruned,
// but PruneThreshold is at 100 so nothing will be pruned.
s.finalizedCheckpoint.Root = indexToHash(99)
require.NoError(t, s.prune(context.Background()))
assert.Equal(t, 100, len(s.nodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_MoreThanThreshold(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := uint64(100)
@@ -193,7 +160,6 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
}
s := f.store
s.pruneThreshold = 0
// Finalized root is at index 99 so everything before 99 should be pruned.
s.finalizedCheckpoint.Root = indexToHash(99)
@@ -216,7 +182,6 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
}
s := f.store
s.pruneThreshold = 0
// Finalized root is at index 11 so everything before 11 should be pruned.
s.finalizedCheckpoint.Root = indexToHash(10)
@@ -229,6 +194,25 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
assert.Equal(t, 80, len(s.nodeByRoot), "Incorrect nodes count")
}
func TestStore_Prune_ReturnEarly(t *testing.T) {
// Define 100 nodes in store.
numOfNodes := uint64(100)
f := setup(0, 0)
ctx := context.Background()
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
for i := uint64(2); i < numOfNodes; i++ {
state, blkRoot, err = prepareForkchoiceState(ctx, types.Slot(i), indexToHash(i), indexToHash(i-1), params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
}
require.NoError(t, f.store.prune(ctx))
nodeCount := f.NodeCount()
require.NoError(t, f.store.prune(ctx))
require.Equal(t, nodeCount, f.NodeCount())
}
// This unit tests starts with a simple branch like this
//
// - 1
@@ -245,7 +229,6 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
f.store.pruneThreshold = 0
s := f.store
s.finalizedCheckpoint.Root = indexToHash(1)
@@ -330,7 +313,6 @@ func TestStore_PruneMapsNodes(t *testing.T) {
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
s := f.store
s.pruneThreshold = 0
s.finalizedCheckpoint.Root = indexToHash(1)
require.NoError(t, s.prune(context.Background()))
require.Equal(t, len(s.nodeByRoot), 1)

View File

@@ -24,7 +24,6 @@ type Store struct {
unrealizedFinalizedCheckpoint *forkchoicetypes.Checkpoint // best unrealized finalized checkpoint in store.
prevJustifiedCheckpoint *forkchoicetypes.Checkpoint // previous justified checkpoint in store.
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
pruneThreshold uint64 // do not prune tree unless threshold is reached.
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
previousProposerBoostScore uint64 // previous proposer boosted root score.

View File

@@ -5,6 +5,7 @@ import (
"testing"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/testing/require"
@@ -198,31 +199,36 @@ func TestStore_NoDeadLock(t *testing.T) {
// D justifies and comes late.
//
func TestStore_ForkNextEpoch(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableDefensivePull: true,
})
defer resetCfg()
f := setup(0, 0)
ctx := context.Background()
// Epoch 1 blocks (D does not arrive)
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
state, blkRoot, err := prepareForkchoiceState(ctx, 92, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 93, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 94, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
// Epoch 2 blocks
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'c'}, [32]byte{'E'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'e'}, [32]byte{'c'}, [32]byte{'E'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 98, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
state, blkRoot, err = prepareForkchoiceState(ctx, 107, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 99, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
@@ -234,16 +240,25 @@ func TestStore_ForkNextEpoch(t *testing.T) {
require.Equal(t, types.Epoch(0), f.JustifiedCheckpoint().Epoch)
// D arrives late, D is head
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
state, blkRoot, err = prepareForkchoiceState(ctx, 95, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
require.NoError(t, err)
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 1))
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1}
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 2))
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 2}
f.updateUnrealizedCheckpoints()
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'d'}, headRoot)
require.Equal(t, types.Epoch(1), f.JustifiedCheckpoint().Epoch)
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
driftGenesisTime(f, 99, 0)
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
headRoot, err = f.Head(ctx, []uint64{100})
require.NoError(t, err)
require.Equal(t, [32]byte{'h'}, headRoot)
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
}

View File

@@ -261,14 +261,6 @@ func TestVotes_CanFindHead(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
// Verify pruning below the prune threshold does not affect head.
f.store.pruneThreshold = 1000
prevRoot := f.store.finalizedCheckpoint.Root
f.store.finalizedCheckpoint.Root = indexToHash(5)
require.NoError(t, f.store.prune(context.Background()))
assert.Equal(t, 11, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
f.store.finalizedCheckpoint.Root = prevRoot
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
@@ -289,13 +281,11 @@ func TestVotes_CanFindHead(t *testing.T) {
// 8
// / \
// 9 10
f.store.pruneThreshold = 1
f.store.finalizedCheckpoint.Root = indexToHash(5)
require.NoError(t, f.store.prune(context.Background()))
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
// we pruned artificially the justified root.
f.store.justifiedCheckpoint.Root = indexToHash(5)
f.store.finalizedCheckpoint.Root = prevRoot
r, err = f.Head(context.Background(), balances)
require.NoError(t, err)

View File

@@ -64,6 +64,7 @@ go_test(
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/v3:go_default_library",
"//config/features:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",

View File

@@ -885,6 +885,15 @@ func (s *Store) viableForHead(node *Node) bool {
// It's also viable if we are in genesis epoch.
justified := s.justifiedCheckpoint.Epoch == node.justifiedEpoch || s.justifiedCheckpoint.Epoch == 0
finalized := s.finalizedCheckpoint.Epoch == node.finalizedEpoch || s.finalizedCheckpoint.Epoch == 0
if features.Get().EnableDefensivePull {
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(s.genesisTime), 0))
if !justified && s.justifiedCheckpoint.Epoch+1 == currentEpoch {
if node.unrealizedJustifiedEpoch+1 >= currentEpoch {
justified = true
finalized = true
}
}
}
return justified && finalized
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
@@ -887,6 +888,43 @@ func TestStore_ViableForHead(t *testing.T) {
}
}
func TestStore_ViableForHead_DefensivePull(t *testing.T) {
resetCfg := features.InitWithReset(&features.Flags{
EnableDefensivePull: true,
})
defer resetCfg()
tests := []struct {
n *Node
justifiedEpoch types.Epoch
finalizedEpoch types.Epoch
currentEpoch types.Epoch
want bool
}{
{&Node{}, 0, 0, 0, true},
{&Node{}, 1, 0, 1, false},
{&Node{}, 0, 1, 1, false},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, 1, true},
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, 2, false},
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, 3, true},
{&Node{unrealizedFinalizedEpoch: 3, unrealizedJustifiedEpoch: 4}, 3, 2, 4, true},
{&Node{unrealizedFinalizedEpoch: 2, unrealizedJustifiedEpoch: 3}, 3, 2, 4, true},
{&Node{unrealizedFinalizedEpoch: 1, unrealizedJustifiedEpoch: 2}, 3, 2, 4, false},
}
for _, tc := range tests {
jc := &forkchoicetypes.Checkpoint{Epoch: tc.justifiedEpoch}
fc := &forkchoicetypes.Checkpoint{Epoch: tc.finalizedEpoch}
currentTime := uint64(time.Now().Unix())
driftSeconds := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
s := &Store{
justifiedCheckpoint: jc,
finalizedCheckpoint: fc,
genesisTime: currentTime - driftSeconds*uint64(tc.currentEpoch),
}
assert.Equal(t, tc.want, s.viableForHead(tc.n))
}
}
func TestStore_HasParent(t *testing.T) {
tests := []struct {
m map[[32]byte]uint64

View File

@@ -105,7 +105,6 @@ type BeaconNode struct {
slasherAttestationsFeed *event.Feed
finalizedStateAtStartUp state.BeaconState
serviceFlagOpts *serviceFlagOpts
blockchainFlagOpts []blockchain.Option
GenesisInitializer genesis.Initializer
CheckpointInitializer checkpoint.Initializer
}

View File

@@ -1,20 +1,39 @@
package p2p
import (
"strings"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
var (
knownAgentVersions = []string{
"lighthouse",
"nimbus",
"prysm",
"teku",
"js-libp2p",
"rust-libp2p",
}
p2pPeerCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "p2p_peer_count",
Help: "The number of peers in a given state.",
},
[]string{"state"})
totalPeerCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "libp2p_peers",
Help: "Tracks the total number of libp2p peers",
})
connectedPeersCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "connected_libp2p_peers",
Help: "Tracks the total number of connected libp2p peers by agent string",
},
[]string{"agent"},
)
avgScoreConnectedClients = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "connected_libp2p_peers_average_scores",
Help: "Tracks the overall p2p scores of connected libp2p peers by agent string",
},
[]string{"agent"},
)
repeatPeerConnections = promauto.NewCounter(prometheus.CounterOpts{
Name: "p2p_repeat_attempts",
Help: "The number of repeat attempts the connection handler is triggered for a peer.",
@@ -46,10 +65,60 @@ var (
)
func (s *Service) updateMetrics() {
totalPeerCount.Set(float64(len(s.peers.Connected())))
p2pPeerCount.WithLabelValues("Connected").Set(float64(len(s.peers.Connected())))
connectedPeers := s.peers.Connected()
p2pPeerCount.WithLabelValues("Connected").Set(float64(len(connectedPeers)))
p2pPeerCount.WithLabelValues("Disconnected").Set(float64(len(s.peers.Disconnected())))
p2pPeerCount.WithLabelValues("Connecting").Set(float64(len(s.peers.Connecting())))
p2pPeerCount.WithLabelValues("Disconnecting").Set(float64(len(s.peers.Disconnecting())))
p2pPeerCount.WithLabelValues("Bad").Set(float64(len(s.peers.Bad())))
store := s.Host().Peerstore()
numConnectedPeersByClient := make(map[string]float64)
peerScoresByClient := make(map[string][]float64)
for i := 0; i < len(connectedPeers); i++ {
p := connectedPeers[i]
pid, err := peer.Decode(p.String())
if err != nil {
log.WithError(err).Debug("Could not decode peer string")
continue
}
// Get the agent data.
rawAgent, err := store.Get(pid, "AgentVersion")
agent, ok := rawAgent.(string)
if err != nil || !ok {
agent = "unknown"
}
foundName := "unknown"
for _, knownAgent := range knownAgentVersions {
// If the agent string matches one of our known agents, we set
// the value to our own, sanitized string.
if strings.Contains(strings.ToLower(agent), knownAgent) {
foundName = knownAgent
}
}
numConnectedPeersByClient[foundName] += 1
// Get peer scoring data.
overallScore := s.peers.Scorers().Score(pid)
peerScoresByClient[foundName] = append(peerScoresByClient[foundName], overallScore)
}
for agent, total := range numConnectedPeersByClient {
connectedPeersCount.WithLabelValues(agent).Set(total)
}
for agent, scoringData := range peerScoresByClient {
avgScore := average(scoringData)
avgScoreConnectedClients.WithLabelValues(agent).Set(avgScore)
}
}
func average(xs []float64) float64 {
if len(xs) == 0 {
return 0
}
total := 0.0
for _, v := range xs {
total += v
}
return total / float64(len(xs))
}

View File

@@ -44,5 +44,5 @@ func TestHeartbeatParameters(t *testing.T) {
func TestMiscParameters(t *testing.T) {
params.SetupTestConfigCleanup(t)
setPubSubParameters()
assert.Equal(t, randomSubD, pubsub.RandomSubD, "randomSubD")
assert.Equal(t, rSubD, 8, "rSubD")
}

View File

@@ -8,8 +8,10 @@ import (
"github.com/libp2p/go-libp2p-core/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v3/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v3/config/params"
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
pbrpc "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
)
@@ -32,7 +34,7 @@ const (
gossipSubHeartbeatInterval = 700 * time.Millisecond // frequency of heartbeat, milliseconds
// misc
randomSubD = 6 // random gossip target
rSubD = 8 // random gossip target
)
var errInvalidTopic = errors.New("invalid topic format")
@@ -128,6 +130,25 @@ func (s *Service) peerInspector(peerMap map[peer.ID]*pubsub.PeerScoreSnapshot) {
}
}
// Creates a list of pubsub options to configure out router with.
func (s *Service) pubsubOptions() []pubsub.Option {
psOpts := []pubsub.Option{
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
pubsub.WithNoAuthor(),
pubsub.WithMessageIdFn(func(pmsg *pubsubpb.Message) string {
return MsgID(s.genesisValidatorsRoot, pmsg)
}),
pubsub.WithSubscriptionFilter(s),
pubsub.WithPeerOutboundQueueSize(pubsubQueueSize),
pubsub.WithMaxMessageSize(int(params.BeaconNetworkConfig().GossipMaxSizeBellatrix)),
pubsub.WithValidateQueueSize(pubsubQueueSize),
pubsub.WithPeerScore(peerScoringParams()),
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
pubsub.WithGossipSubParams(pubsubGossipParam()),
}
return psOpts
}
// creates a custom gossipsub parameter set.
func pubsubGossipParam() pubsub.GossipSubParams {
gParams := pubsub.DefaultGossipSubParams()

View File

@@ -18,7 +18,6 @@ import (
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
"github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
@@ -139,19 +138,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
// due to libp2p's gossipsub implementation not taking into
// account previously added peers when creating the gossipsub
// object.
psOpts := []pubsub.Option{
pubsub.WithMessageSignaturePolicy(pubsub.StrictNoSign),
pubsub.WithNoAuthor(),
pubsub.WithMessageIdFn(func(pmsg *pubsubpb.Message) string {
return MsgID(s.genesisValidatorsRoot, pmsg)
}),
pubsub.WithSubscriptionFilter(s),
pubsub.WithPeerOutboundQueueSize(pubsubQueueSize),
pubsub.WithValidateQueueSize(pubsubQueueSize),
pubsub.WithPeerScore(peerScoringParams()),
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
pubsub.WithGossipSubParams(pubsubGossipParam()),
}
psOpts := s.pubsubOptions()
// Set the pubsub global parameters that we require.
setPubSubParameters()
// Reinitialize them in the event we are running a custom config.
@@ -245,9 +232,7 @@ func (s *Service) Start() {
})
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
async.RunEvery(s.ctx, params.BeaconNetworkConfig().RespTimeout, s.updateMetrics)
async.RunEvery(s.ctx, refreshRate, func() {
s.RefreshENR()
})
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
async.RunEvery(s.ctx, 1*time.Minute, func() {
log.WithFields(logrus.Fields{
"inbound": len(s.peers.InboundConnected()),

View File

@@ -195,11 +195,11 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
// where `epoch` is described as `epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD <= current_epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD + 1`.
//
// Algorithm:
// - Get the last valid epoch. This is the last epoch of the next sync committee period.
// - Get the state for the requested epoch. If it's a future epoch from the current sync committee period
// or an epoch from the next sync committee period, then get the current state.
// - Get the state's current sync committee. If it's an epoch from the next sync committee period, then get the next sync committee.
// - Get duties.
// - Get the last valid epoch. This is the last epoch of the next sync committee period.
// - Get the state for the requested epoch. If it's a future epoch from the current sync committee period
// or an epoch from the next sync committee period, then get the current state.
// - Get the state's current sync committee. If it's an epoch from the next sync committee period, then get the next sync committee.
// - Get duties.
func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncCommitteeDutiesRequest) (*ethpbv2.SyncCommitteeDutiesResponse, error) {
ctx, span := trace.StartSpan(ctx, "validator.GetSyncCommitteeDuties")
defer span.End()
@@ -1035,19 +1035,6 @@ func v1ValidatorStatusToV1Alpha1(valStatus ethpbv1.ValidatorStatus) ethpbalpha.V
}
}
func (vs *Server) v1BeaconBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv1.BeaconBlock, error) {
v1alpha1req := &ethpbalpha.BlockRequest{
Slot: req.Slot,
RandaoReveal: req.RandaoReveal,
Graffiti: req.Graffiti,
}
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
if err != nil {
return nil, err
}
return migration.V1Alpha1ToV1Block(v1alpha1resp.GetPhase0())
}
func syncCommitteeDutiesLastValidEpoch(currentEpoch types.Epoch) types.Epoch {
currentSyncPeriodIndex := currentEpoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod
// Return the last epoch of the next sync committee.

View File

@@ -53,6 +53,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",

View File

@@ -127,9 +127,8 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
return nil, errors.New("builder returned nil bid")
}
v := bid.Message.Value
if new(big.Int).SetBytes(bytesutil.ReverseByteOrder(v)).String() == "0" {
v := new(big.Int).SetBytes(bytesutil.ReverseByteOrder(bid.Message.Value))
if v.String() == "0" {
return nil, errors.New("builder returned header with 0 bid amount")
}
@@ -159,7 +158,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
}
log.WithFields(logrus.Fields{
"bid": bytesutil.BytesToUint64BigEndian(bid.Message.Value),
"value": v.String(),
"builderPubKey": fmt.Sprintf("%#x", bid.Message.Pubkey),
"blockHash": fmt.Sprintf("%#x", bid.Message.Header.BlockHash),
}).Info("Received header with bid")

View File

@@ -8,6 +8,7 @@ import (
fastssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v3/config/features"
"github.com/prysmaticlabs/prysm/v3/config/params"
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
@@ -106,6 +107,9 @@ func (vs *Server) canonicalEth1Data(
canonicalEth1Data = beaconState.Eth1Data()
eth1BlockHash = bytesutil.ToBytes32(beaconState.Eth1Data().BlockHash)
}
if features.Get().DisableStakinContractCheck && eth1BlockHash == [32]byte{} {
return canonicalEth1Data, new(big.Int).SetInt64(0), nil
}
_, canonicalEth1DataHeight, err := vs.Eth1BlockFetcher.BlockExists(ctx, eth1BlockHash)
if err != nil {
return nil, nil, errors.Wrap(err, "could not fetch eth1data height")

View File

@@ -123,6 +123,9 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not determine head state: %v", err)
}
if st == nil || st.IsNil() {
return nil, status.Errorf(codes.Internal, "head state is empty")
}
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.PublicKey))
if !ok {
return nil, status.Errorf(codes.NotFound, "Could not find validator index for public key %#x", req.PublicKey)

View File

@@ -48,6 +48,18 @@ func TestValidatorIndex_OK(t *testing.T) {
assert.NoError(t, err, "Could not get validator index")
}
func TestValidatorIndex_StateEmpty(t *testing.T) {
Server := &Server{
HeadFetcher: &mockChain.ChainService{},
}
pubKey := pubKey(1)
req := &ethpb.ValidatorIndexRequest{
PublicKey: pubKey,
}
_, err := Server.ValidatorIndex(context.Background(), req)
assert.ErrorContains(t, "head state is empty", err)
}
func TestWaitForActivation_ContextClosed(t *testing.T) {
beaconState, err := v1.InitializeFromProto(&ethpb.BeaconState{
Slot: 0,

View File

@@ -30,13 +30,14 @@ var errParticipation = status.Errorf(codes.Internal, "Failed to obtain epoch par
// ValidatorStatus returns the validator status of the current epoch.
// The status response can be one of the following:
// DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum.
// PENDING - validator is in Ethereum's activation queue.
// ACTIVE - validator is active.
// EXITING - validator has initiated an an exit request, or has dropped below the ejection balance and is being kicked out.
// EXITED - validator is no longer validating.
// SLASHING - validator has been kicked out due to meeting a slashing condition.
// UNKNOWN_STATUS - validator does not have a known status in the network.
//
// DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum.
// PENDING - validator is in Ethereum's activation queue.
// ACTIVE - validator is active.
// EXITING - validator has initiated an an exit request, or has dropped below the ejection balance and is being kicked out.
// EXITED - validator is no longer validating.
// SLASHING - validator has been kicked out due to meeting a slashing condition.
// UNKNOWN_STATUS - validator does not have a known status in the network.
func (vs *Server) ValidatorStatus(
ctx context.Context,
req *ethpb.ValidatorStatusRequest,
@@ -363,15 +364,6 @@ func (vs *Server) validatorStatus(
}
}
func (vs *Server) retrieveAfterEpochTransition(ctx context.Context, epoch types.Epoch) (state.BeaconState, error) {
endSlot, err := slots.EpochEnd(epoch)
if err != nil {
return nil, err
}
// replay to first slot of following epoch
return vs.ReplayerBuilder.ReplayerForSlot(endSlot).ReplayToSlot(ctx, endSlot+1)
}
func checkValidatorsAreRecent(headEpoch types.Epoch, req *ethpb.DoppelGangerRequest) (bool, *ethpb.DoppelGangerResponse) {
validatorsAreRecent := true
resp := &ethpb.DoppelGangerResponse{

View File

@@ -239,7 +239,8 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
// Is the state the genesis state.
parentRoot := bytesutil.ToBytes32(b.Block().ParentRoot())
if parentRoot == params.BeaconConfig().ZeroHash {
return s.beaconDB.GenesisState(ctx)
s, err := s.beaconDB.GenesisState(ctx)
return s, errors.Wrap(err, "could not get genesis state")
}
// Return an error if slot hasn't been covered by checkpoint sync.
@@ -268,12 +269,13 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
// Does the state exists in DB.
if s.beaconDB.HasState(ctx, parentRoot) {
return s.beaconDB.State(ctx, parentRoot)
s, err := s.beaconDB.State(ctx, parentRoot)
return s, errors.Wrap(err, "failed to retrieve state from db")
}
b, err = s.beaconDB.Block(ctx, parentRoot)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to retrieve block from db")
}
if b == nil || b.IsNil() {
return nil, errUnknownBlock

View File

@@ -13,4 +13,16 @@ var (
Buckets: []float64{64, 256, 1024, 2048, 4096},
},
)
replayBlocksSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "replay_blocks_milliseconds",
Help: "Time it took to replay blocks",
},
)
replayToSlotSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "replay_to_slot_milliseconds",
Help: "Time it took to replay to slot",
},
)
)

View File

@@ -63,7 +63,6 @@ type chainer interface {
}
type stateReplayer struct {
s state.BeaconState
target types.Slot
method retrievalMethod
chainer chainer
@@ -120,6 +119,7 @@ func (rs *stateReplayer) ReplayBlocks(ctx context.Context) (state.BeaconState, e
log.WithFields(logrus.Fields{
"duration": duration,
}).Debug("Finished calling process_blocks on all blocks in ReplayBlocks")
replayBlocksSummary.Observe(float64(duration.Milliseconds()))
return s, nil
}
@@ -151,14 +151,14 @@ func (rs *stateReplayer) ReplayToSlot(ctx context.Context, replayTo types.Slot)
// err will be handled after the bookend log
s, err = ReplayProcessSlots(ctx, s, replayTo)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("ReplayToSlot failed to seek to slot %d after applying blocks", replayTo))
}
duration := time.Since(start)
log.WithFields(logrus.Fields{
"duration": duration,
}).Debug("time spent in process_slots")
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("ReplayToSlot failed to seek to slot %d after applying blocks", replayTo))
}
replayToSlotSummary.Observe(float64(duration.Milliseconds()))
return s, nil
}

View File

@@ -131,7 +131,7 @@ func (s *Service) processFetchedData(
// Use Batch Block Verify to process and verify batches directly.
if err := s.processBatchedBlocks(ctx, genesis, data.blocks, s.cfg.Chain.ReceiveBlockBatch); err != nil {
log.WithError(err).Warn("Batch is not processed")
log.WithError(err).Warn("Skip processing batched blocks")
}
}
@@ -260,7 +260,7 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
headSlot := s.cfg.Chain.HeadSlot()
for headSlot >= firstBlock.Block().Slot() && s.isProcessedBlock(ctx, firstBlock, blkRoot) {
if len(blks) == 1 {
return errors.New("no good blocks in batch")
return fmt.Errorf("headSlot:%d, blockSlot:%d , root %#x:%w", headSlot, firstBlock.Block().Slot(), blkRoot, errBlockAlreadyProcessed)
}
blks = blks[1:]
firstBlock = blks[0]

View File

@@ -457,7 +457,7 @@ func TestService_processBlockBatch(t *testing.T) {
ctx context.Context, blocks []interfaces.SignedBeaconBlock, blockRoots [][32]byte) error {
return nil
})
assert.ErrorContains(t, "no good blocks in batch", err)
assert.ErrorContains(t, "block is already processed", err)
var badBatch2 []interfaces.SignedBeaconBlock
for i, b := range batch2 {

View File

@@ -89,6 +89,38 @@ var (
Buckets: []float64{250, 500, 1000, 1500, 2000, 4000, 8000, 16000},
},
)
// Attestation processing granular error tracking.
attBadBlockCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "gossip_attestation_bad_block_total",
Help: "Increased when a gossip attestation references a bad block",
})
attBadLmdConsistencyCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "gossip_attestation_bad_lmd_consistency_total",
Help: "Increased when a gossip attestation has bad LMD GHOST consistency",
})
attBadSelectionProofCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "gossip_attestation_bad_selection_proof_total",
Help: "Increased when a gossip attestation has a bad selection proof",
})
attBadSignatureBatchCount = promauto.NewCounter(prometheus.CounterOpts{
Name: "gossip_attestation_bad_signature_batch_total",
Help: "Increased when a gossip attestation has a bad signature batch",
})
// Attestation and block gossip verification performance.
aggregateAttestationVerificationGossipSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "gossip_aggregate_attestation_verification_milliseconds",
Help: "Time to verify gossiped attestations",
},
)
blockVerificationGossipSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "gossip_block_verification_milliseconds",
Help: "Time to verify gossiped blocks",
},
)
)
func (s *Service) updateMetrics() {

View File

@@ -20,6 +20,7 @@ import (
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v3/time"
"github.com/prysmaticlabs/prysm/v3/time/slots"
"go.opencensus.io/trace"
)
@@ -27,6 +28,7 @@ import (
// validateAggregateAndProof verifies the aggregated signature and the selection proof is valid before forwarding to the
// network and downstream services.
func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
receivedTime := prysmTime.Now()
if pid == s.cfg.p2p.PeerID() {
return pubsub.ValidationAccept, nil
}
@@ -75,8 +77,11 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
// processing tolerance.
if err := helpers.ValidateAttestationTime(m.Message.Aggregate.Data.Slot, s.cfg.chain.GenesisTime(),
earlyAttestationProcessingTolerance); err != nil {
if err := helpers.ValidateAttestationTime(
m.Message.Aggregate.Data.Slot,
s.cfg.chain.GenesisTime(),
earlyAttestationProcessingTolerance,
); err != nil {
tracing.AnnotateError(span, err)
return pubsub.ValidationIgnore, err
}
@@ -89,6 +94,7 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
attBadBlockCount.Inc()
return pubsub.ValidationReject, errors.New("bad block referenced in attestation data")
}
@@ -114,6 +120,8 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
msg.ValidatorData = m
aggregateAttestationVerificationGossipSummary.Observe(float64(prysmTime.Since(receivedTime).Milliseconds()))
return pubsub.ValidationAccept, nil
}
@@ -127,6 +135,7 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
// but it's invalid in the spirit of the protocol. Here we choose safety over profit.
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, signed.Message.Aggregate); err != nil {
tracing.AnnotateError(span, err)
attBadLmdConsistencyCount.Inc()
return pubsub.ValidationReject, err
}
@@ -168,6 +177,7 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
if err != nil {
wrappedErr := errors.Wrapf(err, "Could not validate selection for validator %d", signed.Message.AggregatorIndex)
tracing.AnnotateError(span, wrappedErr)
attBadSelectionProofCount.Inc()
return pubsub.ValidationReject, wrappedErr
}

View File

@@ -124,6 +124,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
if s.hasBadBlock(bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) ||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Target.Root)) ||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Source.Root)) {
attBadBlockCount.Inc()
return pubsub.ValidationReject, errors.New("attestation data references bad block root")
}
@@ -141,6 +142,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
}
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, att); err != nil {
tracing.AnnotateError(span, err)
attBadLmdConsistencyCount.Inc()
return pubsub.ValidationReject, err
}
@@ -222,6 +224,7 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a *eth.A
set, err := blocks.AttestationSignatureBatch(ctx, bs, []*eth.Attestation{a})
if err != nil {
tracing.AnnotateError(span, err)
attBadSignatureBatchCount.Inc()
return pubsub.ValidationReject, err
}
return s.validateWithBatchVerifier(ctx, "attestation", set)

View File

@@ -204,6 +204,8 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
"proposerIndex": blk.Block().ProposerIndex(),
"graffiti": string(blk.Block().Body().Graffiti()),
}).Debug("Received block")
blockVerificationGossipSummary.Observe(float64(prysmTime.Since(receivedTime).Milliseconds()))
return pubsub.ValidationAccept, nil
}
@@ -253,16 +255,17 @@ func (s *Service) validateBeaconBlock(ctx context.Context, blk interfaces.Signed
// validateBellatrixBeaconBlock validates the block for the Bellatrix fork.
// spec code:
// If the execution is enabled for the block -- i.e. is_execution_enabled(state, block.body) then validate the following:
// [REJECT] The block's execution payload timestamp is correct with respect to the slot --
// i.e. execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot).
//
// If exection_payload verification of block's parent by an execution node is not complete:
// [REJECT] The block's parent (defined by block.parent_root) passes all validation (excluding execution
// node verification of the block.body.execution_payload).
// otherwise:
// [IGNORE] The block's parent (defined by block.parent_root) passes all validation (including execution
// node verification of the block.body.execution_payload).
// If the execution is enabled for the block -- i.e. is_execution_enabled(state, block.body) then validate the following:
// [REJECT] The block's execution payload timestamp is correct with respect to the slot --
// i.e. execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot).
//
// If exection_payload verification of block's parent by an execution node is not complete:
// [REJECT] The block's parent (defined by block.parent_root) passes all validation (excluding execution
// node verification of the block.body.execution_payload).
// otherwise:
// [IGNORE] The block's parent (defined by block.parent_root) passes all validation (including execution
// node verification of the block.body.execution_payload).
func (s *Service) validateBellatrixBeaconBlock(ctx context.Context, parentState state.BeaconState, blk interfaces.BeaconBlock) error {
// Error if block and state are not the same version
if parentState.Version() != blk.Version() {

View File

@@ -23,9 +23,11 @@ func FlagOptions(c *cli.Context) ([]execution.Option, error) {
if err != nil {
return nil, errors.Wrap(err, "could not read JWT secret file for authenticating execution API")
}
headers := strings.Split(c.String(flags.ExecutionEngineHeaders.Name), ",")
opts := []execution.Option{
execution.WithHttpEndpoint(endpoint),
execution.WithEth1HeaderRequestLimit(c.Uint64(flags.Eth1HeaderReqLimit.Name)),
execution.WithHeaders(headers),
}
if len(jwtSecret) > 0 {
opts = append(opts, execution.WithHttpEndpointAndJWTSecret(endpoint, jwtSecret))

View File

@@ -32,6 +32,12 @@ var (
Usage: "An execution client http endpoint. Can contain auth header as well in the format",
Value: "http://localhost:8551",
}
// ExecutionEngineHeaders defines a list of HTTP headers to send with all execution client requests.
ExecutionEngineHeaders = &cli.StringFlag{
Name: "execution-headers",
Usage: "A comma separated list of key value pairs to pass as HTTP headers for all execution " +
"client calls. Example: --execution-headers=key1=value1,key2=value2",
}
// Deprecated: HTTPWeb3ProviderFlag is a deprecated flag and is an alias for the ExecutionEngineEndpoint flag.
HTTPWeb3ProviderFlag = &cli.StringFlag{
Name: "http-web3provider",

View File

@@ -38,6 +38,7 @@ import (
var appFlags = []cli.Flag{
flags.DepositContractFlag,
flags.ExecutionEngineEndpoint,
flags.ExecutionEngineHeaders,
flags.HTTPWeb3ProviderFlag,
flags.ExecutionJWTSecretFlag,
flags.RPCHost,

View File

@@ -107,6 +107,7 @@ var appHelpFlagGroups = []flagGroup{
flags.GRPCGatewayPort,
flags.GPRCGatewayCorsDomain,
flags.ExecutionEngineEndpoint,
flags.ExecutionEngineHeaders,
flags.HTTPWeb3ProviderFlag,
flags.ExecutionJWTSecretFlag,
flags.SetGCPercent,

View File

@@ -11,9 +11,11 @@ go_library(
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl",
visibility = ["//visibility:private"],
deps = [
"//cmd/prysmctl/checkpoint:go_default_library",
"//cmd/prysmctl/checkpointsync:go_default_library",
"//cmd/prysmctl/deprecated:go_default_library",
"//cmd/prysmctl/p2p:go_default_library",
"//cmd/prysmctl/testnet:go_default_library",
"//cmd/prysmctl/weaksubjectivity:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],

View File

@@ -3,11 +3,10 @@ load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"checkpoint.go",
"latest.go",
"save.go",
"cmd.go",
"download.go",
],
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/checkpoint",
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/checkpointsync",
visibility = ["//visibility:public"],
deps = [
"//api/client/beacon:go_default_library",

View File

@@ -0,0 +1,14 @@
package checkpointsync
import "github.com/urfave/cli/v2"
var Commands = []*cli.Command{
{
Name: "checkpoint-sync",
Aliases: []string{"cpt-sync"},
Usage: "commands for managing checkpoint sync",
Subcommands: []*cli.Command{
downloadCmd,
},
},
}

View File

@@ -1,4 +1,4 @@
package checkpoint
package checkpointsync
import (
"context"
@@ -10,37 +10,38 @@ import (
"github.com/urfave/cli/v2"
)
var saveFlags = struct {
var downloadFlags = struct {
BeaconNodeHost string
Timeout time.Duration
}{}
var saveCmd = &cli.Command{
Name: "save",
Usage: "Save the latest finalized header and the most recent block it integrates. To be used for checkpoint sync.",
Action: cliActionSave,
var downloadCmd = &cli.Command{
Name: "download",
Aliases: []string{"dl"},
Usage: "Download the latest finalized state and the most recent block it integrates. To be used for checkpoint sync.",
Action: cliActionDownload,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "beacon-node-host",
Usage: "host:port for beacon node connection",
Destination: &saveFlags.BeaconNodeHost,
Destination: &downloadFlags.BeaconNodeHost,
Value: "localhost:3500",
},
&cli.DurationFlag{
Name: "http-timeout",
Usage: "timeout for http requests made to beacon-node-url (uses duration format, ex: 2m31s). default: 4m",
Destination: &saveFlags.Timeout,
Destination: &downloadFlags.Timeout,
Value: time.Minute * 4,
},
},
}
func cliActionSave(_ *cli.Context) error {
func cliActionDownload(_ *cli.Context) error {
ctx := context.Background()
f := saveFlags
f := downloadFlags
opts := []beacon.ClientOpt{beacon.WithTimeout(f.Timeout)}
client, err := beacon.NewClient(saveFlags.BeaconNodeHost, opts...)
client, err := beacon.NewClient(downloadFlags.BeaconNodeHost, opts...)
if err != nil {
return err
}

View File

@@ -0,0 +1,12 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["cmd.go"],
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/deprecated",
visibility = ["//visibility:public"],
deps = [
"//cmd/prysmctl/deprecated/checkpoint:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -0,0 +1,13 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"checkpoint.go",
"latest.go",
"save.go",
],
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/deprecated/checkpoint",
visibility = ["//visibility:public"],
deps = ["@com_github_urfave_cli_v2//:go_default_library"],
)

View File

@@ -6,9 +6,9 @@ var Commands = []*cli.Command{
{
Name: "checkpoint",
Aliases: []string{"cpt"},
Usage: "commands for managing checkpoint syncing",
Usage: "deprecated",
Subcommands: []*cli.Command{
latestCmd,
checkpointCmd,
saveCmd,
},
},

View File

@@ -0,0 +1,17 @@
package checkpoint
import (
"fmt"
"github.com/urfave/cli/v2"
)
var checkpointCmd = &cli.Command{
Name: "latest",
Usage: "deprecated - please use 'prysmctl weak-subjectivity checkpoint' instead!",
Action: cliDeprecatedLatest,
}
func cliDeprecatedLatest(_ *cli.Context) error {
return fmt.Errorf("This command has moved. Please use 'prysmctl weak-subjectivity checkpoint' instead!")
}

View File

@@ -0,0 +1,17 @@
package checkpoint
import (
"fmt"
"github.com/urfave/cli/v2"
)
var saveCmd = &cli.Command{
Name: "save",
Usage: "deprecated - please use 'prysmctl checkpoint-sync download' instead!",
Action: cliActionDeprecatedSave,
}
func cliActionDeprecatedSave(_ *cli.Context) error {
return fmt.Errorf("This command has moved. Please use 'prysmctl checkpoint-sync download' instead!")
}

View File

@@ -0,0 +1,12 @@
package deprecated
import (
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/deprecated/checkpoint"
"github.com/urfave/cli/v2"
)
var Commands = []*cli.Command{}
func init() {
Commands = append(Commands, checkpoint.Commands...)
}

View File

@@ -3,9 +3,11 @@ package main
import (
"os"
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/checkpoint"
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/checkpointsync"
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/deprecated"
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/p2p"
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/testnet"
"github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/weaksubjectivity"
log "github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
@@ -23,7 +25,12 @@ func main() {
}
func init() {
prysmctlCommands = append(prysmctlCommands, checkpoint.Commands...)
prysmctlCommands = append(prysmctlCommands, testnet.Commands...)
// contains the old checkpoint sync subcommands. these commands should display help/warn messages
// pointing to their new locations
prysmctlCommands = append(prysmctlCommands, deprecated.Commands...)
prysmctlCommands = append(prysmctlCommands, checkpointsync.Commands...)
prysmctlCommands = append(prysmctlCommands, p2p.Commands...)
prysmctlCommands = append(prysmctlCommands, testnet.Commands...)
prysmctlCommands = append(prysmctlCommands, weaksubjectivity.Commands...)
}

View File

@@ -0,0 +1,15 @@
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"checkpoint.go",
"cmd.go",
],
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/weaksubjectivity",
visibility = ["//visibility:public"],
deps = [
"//api/client/beacon:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],
)

View File

@@ -1,4 +1,4 @@
package checkpoint
package weaksubjectivity
import (
"context"
@@ -9,37 +9,38 @@ import (
"github.com/urfave/cli/v2"
)
var latestFlags = struct {
var checkpointFlags = struct {
BeaconNodeHost string
Timeout time.Duration
}{}
var latestCmd = &cli.Command{
Name: "latest",
Usage: "Compute the latest weak subjectivity checkpoint (block_root:epoch) using trusted server data.",
Action: cliActionLatest,
var checkpointCmd = &cli.Command{
Name: "checkpoint",
Aliases: []string{"cpt"},
Usage: "Compute the latest weak subjectivity checkpoint (block_root:epoch) using trusted server data.",
Action: cliActionCheckpoint,
Flags: []cli.Flag{
&cli.StringFlag{
Name: "beacon-node-host",
Usage: "host:port for beacon node to query",
Destination: &latestFlags.BeaconNodeHost,
Destination: &checkpointFlags.BeaconNodeHost,
Value: "http://localhost:3500",
},
&cli.DurationFlag{
Name: "http-timeout",
Usage: "timeout for http requests made to beacon-node-url (uses duration format, ex: 2m31s). default: 2m",
Destination: &latestFlags.Timeout,
Destination: &checkpointFlags.Timeout,
Value: time.Minute * 2,
},
},
}
func cliActionLatest(_ *cli.Context) error {
func cliActionCheckpoint(_ *cli.Context) error {
ctx := context.Background()
f := latestFlags
f := checkpointFlags
opts := []beacon.ClientOpt{beacon.WithTimeout(f.Timeout)}
client, err := beacon.NewClient(latestFlags.BeaconNodeHost, opts...)
client, err := beacon.NewClient(checkpointFlags.BeaconNodeHost, opts...)
if err != nil {
return err
}

View File

@@ -0,0 +1,14 @@
package weaksubjectivity
import "github.com/urfave/cli/v2"
var Commands = []*cli.Command{
{
Name: "weak-subjectivity",
Aliases: []string{"ws"},
Usage: "commands dealing with weak subjectivity",
Subcommands: []*cli.Command{
checkpointCmd,
},
},
}

View File

@@ -61,13 +61,16 @@ type Flags struct {
EnableSlashingProtectionPruning bool
EnableNativeState bool // EnableNativeState defines whether the beacon state will be represented as a pure Go struct or a Go struct that wraps a proto struct.
DisablePullTips bool // DisablePullTips enables experimental disabling of boundary checks.
DisablePullTips bool // DisablePullTips disables experimental disabling of boundary checks.
EnableDefensivePull bool // EnableDefensivePull enables exerimental back boundary checks.
EnableVectorizedHTR bool // EnableVectorizedHTR specifies whether the beacon state will use the optimized sha256 routines.
DisableForkchoiceDoublyLinkedTree bool // DisableForkChoiceDoublyLinkedTree specifies whether fork choice store will use a doubly linked tree.
EnableBatchGossipAggregation bool // EnableBatchGossipAggregation specifies whether to further aggregate our gossip batches before verifying them.
EnableOnlyBlindedBeaconBlocks bool // EnableOnlyBlindedBeaconBlocks enables only storing blinded beacon blocks in the DB post-Bellatrix fork.
EnableStartOptimistic bool // EnableStartOptimistic treats every block as optimistic at startup.
DisableStakinContractCheck bool // Disables check for deposit contract when proposing blocks
// KeystoreImportDebounceInterval specifies the time duration the validator waits to reload new keys if they have
// changed on disk. This feature is for advanced use cases only.
KeystoreImportDebounceInterval time.Duration
@@ -209,6 +212,14 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
logEnabled(disablePullTips)
cfg.DisablePullTips = true
}
if ctx.Bool(enableDefensivePull.Name) {
logEnabled(enableDefensivePull)
cfg.EnableDefensivePull = true
}
if ctx.Bool(disableStakinContractCheck.Name) {
logEnabled(disableStakinContractCheck)
cfg.DisableStakinContractCheck = true
}
if ctx.Bool(disableVecHTR.Name) {
logEnabled(disableVecHTR)
} else {

View File

@@ -90,6 +90,8 @@ var deprecatedFlags = []cli.Flag{
deprecatedFallbackProvider,
}
// deprecatedBeaconFlags contains flags that are still used by other components
// and therefore cannot be added to deprecatedFlags
var deprecatedBeaconFlags = []cli.Flag{
deprecatedBackupWebHookFlag,
}

View File

@@ -83,6 +83,10 @@ var (
"a foolproof method to find duplicate instances in the network. Your validator will still be" +
" vulnerable if it is being run in unsafe configurations.",
}
disableStakinContractCheck = &cli.BoolFlag{
Name: "disable-staking-contract-check",
Usage: "Disables checking of staking contract deposits when proposing blocks, useful for devnets",
}
enableHistoricalSpaceRepresentation = &cli.BoolFlag{
Name: "enable-historical-state-representation",
Usage: "Enables the beacon chain to save historical states in a space efficient manner." +
@@ -97,6 +101,11 @@ var (
Name: "experimental-enable-boundary-checks",
Usage: "Experimental enable of boundary checks, useful for debugging, may cause bad votes.",
}
enableDefensivePull = &cli.BoolFlag{
Name: "enable-back-pull",
Usage: "Experimental enable of past boundary checks, useful for debugging, may cause bad votes.",
Hidden: true,
}
disableVecHTR = &cli.BoolFlag{
Name: "disable-vectorized-htr",
Usage: "Disables the new go sha256 library which utilizes optimized routines for merkle trees",
@@ -163,6 +172,7 @@ var BeaconChainFlags = append(deprecatedBeaconFlags, append(deprecatedFlags, []c
disableGossipBatchAggregation,
EnableOnlyBlindedBeaconBlocks,
enableStartupOptimistic,
enableDefensivePull,
}...)...)
// E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E.

View File

@@ -2,7 +2,7 @@ package params
const (
altairE2EForkEpoch = 6
bellatrixE2EForkEpoch = 8 //nolint:deadcode
bellatrixE2EForkEpoch = 8
)
// E2ETestConfig retrieves the configurations made specifically for E2E testing.

View File

@@ -24,8 +24,6 @@ var (
ErrNilObject = errors.New("received nil object")
// ErrNilSignedBeaconBlock is returned when a nil signed beacon block is received.
ErrNilSignedBeaconBlock = errors.New("signed beacon block can't be nil")
errNilBeaconBlock = errors.New("beacon block can't be nil")
errNilBeaconBlockBody = errors.New("beacon block body can't be nil")
)
// NewSignedBeaconBlock creates a signed beacon block from a protobuf signed beacon block.

View File

@@ -17,12 +17,6 @@ func BeaconBlockIsNil(b interfaces.SignedBeaconBlock) error {
if b == nil || b.IsNil() {
return ErrNilSignedBeaconBlock
}
if b.Block().IsNil() {
return errNilBeaconBlock
}
if b.Block().Body().IsNil() {
return errNilBeaconBlockBody
}
return nil
}

View File

@@ -1,5 +1,6 @@
base:
language: go
version: 1.18
build_tags:
- fuzz
- develop

View File

@@ -9,7 +9,7 @@ import (
)
// DefaultRPCHTTPTimeout for HTTP requests via an RPC connection to an execution node.
const DefaultRPCHTTPTimeout = time.Second * 6
const DefaultRPCHTTPTimeout = time.Second * 30
// This creates a custom HTTP transport which we can attach to our HTTP client
// in order to inject JWT auth strings into our HTTP request headers. Authentication

View File

@@ -33,7 +33,7 @@ func SinceGenesis(genesis time.Time) types.Slot {
return types.Slot(uint64(prysmTime.Since(genesis).Seconds()) / params.BeaconConfig().SecondsPerSlot)
}
// EpochsSinceGenesis returns the number of slots since
// EpochsSinceGenesis returns the number of epochs since
// the provided genesis time.
func EpochsSinceGenesis(genesis time.Time) types.Epoch {
return types.Epoch(SinceGenesis(genesis) / params.BeaconConfig().SlotsPerEpoch)

View File

@@ -150,17 +150,19 @@ func (v *validator) ProposeBlock(ctx context.Context, slot types.Slot, pubKey [f
log.WithError(err).Error("Failed to get execution payload")
return
}
txs, err := p.Transactions()
if err != nil {
log.WithError(err).Error("Failed to get execution payload transactions")
return
}
log = log.WithFields(logrus.Fields{
"payloadHash": fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(p.ParentHash())),
"blockNumber": p.BlockNumber,
"txCount": len(txs),
})
if !blk.IsBlinded() {
txs, err := p.Transactions()
if err != nil {
log.WithError(err).Error("Failed to get execution payload transactions")
return
}
log = log.WithField("txCount", len(txs))
}
if p.GasLimit() != 0 {
log = log.WithField("gasUtilized", float64(p.GasUsed())/float64(p.GasLimit()))
}

View File

@@ -550,10 +550,23 @@ func testProposeBlock(t *testing.T, graffiti []byte) {
},
},
},
{
name: "bellatrix blind block",
block: &ethpb.GenericBeaconBlock{
Block: &ethpb.GenericBeaconBlock_BlindedBellatrix{
BlindedBellatrix: func() *ethpb.BlindedBeaconBlockBellatrix {
blk := util.NewBlindedBeaconBlockBellatrix()
blk.Block.Body.Graffiti = graffiti
return blk.Block
}(),
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
hook := logTest.NewGlobal()
validator, m, validatorKey, finish := setup(t)
defer finish()
pubKey := [fieldparams.BLSPubkeyLength]byte{}
@@ -594,6 +607,7 @@ func testProposeBlock(t *testing.T, graffiti []byte) {
validator.ProposeBlock(context.Background(), 1, pubKey)
assert.Equal(t, string(validator.graffiti), string(sentBlock.Block().Body().Graffiti()))
require.LogsContain(t, hook, "Submitted new block")
})
}
}

View File

@@ -938,7 +938,7 @@ func (v *validator) logDuties(slot types.Slot, duties []*ethpb.DutiesResponse_Du
}
for i := types.Slot(0); i < params.BeaconConfig().SlotsPerEpoch; i++ {
startTime := slots.StartTime(v.genesisTime, slotOffset+i)
durationTillDuty := time.Until(startTime)
durationTillDuty := time.Until(startTime) + time.Second
if len(attesterKeys[i]) > 0 {
log.WithFields(logrus.Fields{