mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
49 Commits
bazel-rele
...
v3.1.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8627fe72e8 | ||
|
|
65bf3d0fa8 | ||
|
|
a5da9aedd4 | ||
|
|
e1ab034d25 | ||
|
|
84bc8f3d64 | ||
|
|
c4deb84012 | ||
|
|
488e19e428 | ||
|
|
bcaae1c440 | ||
|
|
587ba83aca | ||
|
|
091f16b26c | ||
|
|
fb9626fdd7 | ||
|
|
c638e114db | ||
|
|
b1e08307ed | ||
|
|
cac5d0f234 | ||
|
|
52d48b328f | ||
|
|
9729b2ec77 | ||
|
|
7aa3776aa6 | ||
|
|
760c71ef77 | ||
|
|
6c209db3ca | ||
|
|
0725905797 | ||
|
|
166f8a1eb6 | ||
|
|
85896e994e | ||
|
|
4a00b295ed | ||
|
|
d2b39e9697 | ||
|
|
97dc86e742 | ||
|
|
cff3b99918 | ||
|
|
be9847f23c | ||
|
|
4796827d22 | ||
|
|
57b7e0b572 | ||
|
|
b5039e9bd9 | ||
|
|
f5d792299f | ||
|
|
9ce922304f | ||
|
|
3cbb4aace4 | ||
|
|
c94095b609 | ||
|
|
ae858bbd0a | ||
|
|
30cd158ae5 | ||
|
|
2db22adfe0 | ||
|
|
161a14d256 | ||
|
|
9dee22f7ab | ||
|
|
52271cf0ba | ||
|
|
e1f56d403c | ||
|
|
a2193ee014 | ||
|
|
762b3df491 | ||
|
|
2b3025828f | ||
|
|
436792fe38 | ||
|
|
1d07bffe11 | ||
|
|
f086535c8a | ||
|
|
3a4c599a96 | ||
|
|
1c6cbc574e |
1
.github/workflows/go.yml
vendored
1
.github/workflows/go.yml
vendored
@@ -53,6 +53,7 @@ jobs:
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.47.2
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
name: Build
|
||||
|
||||
@@ -13,7 +13,7 @@ linters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
- deadcode
|
||||
- unused
|
||||
- errcheck
|
||||
- gosimple
|
||||
- gocognit
|
||||
|
||||
@@ -28,7 +28,7 @@ load("@com_grail_bazel_toolchain//toolchain:rules.bzl", "llvm_toolchain")
|
||||
|
||||
llvm_toolchain(
|
||||
name = "llvm_toolchain",
|
||||
llvm_version = "10.0.0",
|
||||
llvm_version = "13.0.1",
|
||||
)
|
||||
|
||||
load("@llvm_toolchain//:toolchains.bzl", "llvm_register_toolchains")
|
||||
|
||||
@@ -21,14 +21,13 @@ import (
|
||||
// OriginData represents the BeaconState and SignedBeaconBlock necessary to start an empty Beacon Node
|
||||
// using Checkpoint Sync.
|
||||
type OriginData struct {
|
||||
wsd *WeakSubjectivityData
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.SignedBeaconBlock
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.SignedBeaconBlock
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
|
||||
@@ -95,8 +95,6 @@ func WithTimeout(timeout time.Duration) ClientOpt {
|
||||
// Client provides a collection of helper methods for calling the Eth Beacon Node API endpoints.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
host string
|
||||
scheme string
|
||||
baseURL *url.URL
|
||||
}
|
||||
|
||||
|
||||
@@ -274,9 +274,6 @@ func non200Err(response *http.Response) error {
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
@@ -285,13 +282,25 @@ func non200Err(response *http.Response) error {
|
||||
log.WithError(ErrNoContent).Debug(msg)
|
||||
return ErrNoContent
|
||||
case 400:
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
log.WithError(ErrBadRequest).Debug(msg)
|
||||
return errors.Wrap(ErrBadRequest, errMessage.Message)
|
||||
case 404:
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
log.WithError(ErrNotFound).Debug(msg)
|
||||
return errors.Wrap(ErrNotFound, errMessage.Message)
|
||||
default:
|
||||
case 500:
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
log.WithError(ErrNotOK).Debug(msg)
|
||||
return errors.Wrap(ErrNotOK, errMessage.Message)
|
||||
default:
|
||||
log.WithError(ErrNotOK).Debug(msg)
|
||||
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,6 +144,23 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.ErrorIs(t, err, ErrNotOK)
|
||||
|
||||
hc = &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusNoContent,
|
||||
Body: io.NopCloser(bytes.NewBuffer([]byte("No header is available."))),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c = &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
_, err = c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.ErrorIs(t, err, ErrNoContent)
|
||||
|
||||
hc = &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
|
||||
@@ -89,7 +89,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
}
|
||||
return payloadID, nil
|
||||
case execution.ErrInvalidPayloadStatus:
|
||||
newPayloadInvalidNodeCount.Inc()
|
||||
forkchoiceUpdatedInvalidNodeCount.Inc()
|
||||
headRoot := arg.headRoot
|
||||
if len(lastValidHash) == 0 {
|
||||
lastValidHash = defaultLatestValidHash
|
||||
|
||||
@@ -1155,6 +1155,18 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
require.Equal(t, false, optimistic)
|
||||
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
|
||||
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
|
||||
|
||||
// Checkpoint with a lower epoch
|
||||
oldCp, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
invalidCp := ðpb.Checkpoint{
|
||||
Epoch: oldCp.Epoch - 1,
|
||||
}
|
||||
// Nothing should happen as we no-op on an invalid checkpoint.
|
||||
require.NoError(t, service.updateFinalized(ctx, invalidCp))
|
||||
got, err := service.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, oldCp, got)
|
||||
}
|
||||
|
||||
func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/math"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -100,7 +101,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != oldHeadRoot {
|
||||
commonRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, oldHeadRoot, newHeadRoot)
|
||||
commonRoot, forkSlot, err := s.ForkChoicer().CommonAncestor(ctx, oldHeadRoot, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not find common ancestor root")
|
||||
commonRoot = params.BeaconConfig().ZeroHash
|
||||
@@ -111,8 +112,9 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
"oldRoot": fmt.Sprintf("%#x", oldHeadRoot),
|
||||
"commonAncestorRoot": fmt.Sprintf("%#x", commonRoot),
|
||||
"distance": headSlot + newHeadSlot - 2*forkSlot,
|
||||
"depth": math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
}).Info("Chain reorg occurred")
|
||||
absoluteSlotDifference := slots.AbsoluteValueSlotDifference(newHeadSlot, headSlot)
|
||||
isOptimistic, err := s.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
@@ -121,7 +123,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
Type: statefeed.Reorg,
|
||||
Data: ðpbv1.EventChainReorg{
|
||||
Slot: newHeadSlot,
|
||||
Depth: absoluteSlotDifference,
|
||||
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: newHeadRoot[:],
|
||||
OldHeadState: oldStateRoot,
|
||||
@@ -342,7 +344,7 @@ func (s *Service) notifyNewHeadEvent(
|
||||
// This saves the attestations between `orphanedRoot` and the common ancestor root that is derived using `newHeadRoot`.
|
||||
// It also filters out the attestations that is one epoch older as a defense so invalid attestations don't flow into the attestation pool.
|
||||
func (s *Service) saveOrphanedAtts(ctx context.Context, orphanedRoot [32]byte, newHeadRoot [32]byte) error {
|
||||
commonAncestorRoot, err := s.ForkChoicer().CommonAncestorRoot(ctx, newHeadRoot, orphanedRoot)
|
||||
commonAncestorRoot, _, err := s.ForkChoicer().CommonAncestor(ctx, newHeadRoot, orphanedRoot)
|
||||
switch {
|
||||
// Exit early if there's no common ancestor and root doesn't exist, there would be nothing to save.
|
||||
case errors.Is(err, forkchoice.ErrUnknownCommonAncestor):
|
||||
|
||||
@@ -150,6 +150,8 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
assert.DeepEqual(t, newHeadSignedBlock, pb, "Head did not change")
|
||||
assert.DeepSSZEqual(t, headState.CloneInnerState(), service.headState(ctx).CloneInnerState(), "Head did not change")
|
||||
require.LogsContain(t, hook, "Chain reorg occurred")
|
||||
require.LogsContain(t, hook, "distance=1")
|
||||
require.LogsContain(t, hook, "depth=1")
|
||||
}
|
||||
|
||||
func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
consensusBlocks "github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
@@ -51,10 +52,15 @@ func logStateTransitionData(b interfaces.BeaconBlock) error {
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, consensusBlocks.ErrUnsupportedGetter):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
log = log.WithField("txCount", len(txs))
|
||||
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
|
||||
@@ -158,10 +158,47 @@ var (
|
||||
Name: "forkchoice_updated_optimistic_node_count",
|
||||
Help: "Count the number of optimistic nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
forkchoiceUpdatedInvalidNodeCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "forkchoice_updated_invalid_node_count",
|
||||
Help: "Count the number of invalid nodes after forkchoiceUpdated EE call",
|
||||
})
|
||||
txsPerSlotCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "txs_per_slot_count",
|
||||
Help: "Count the number of txs per slot",
|
||||
})
|
||||
missedPayloadIDFilledCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "missed_payload_id_filled_count",
|
||||
Help: "",
|
||||
})
|
||||
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "on_block_processing_milliseconds",
|
||||
Help: "Total time in milliseconds to complete a call to onBlock()",
|
||||
})
|
||||
stateTransitionProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "state_transition_processing_milliseconds",
|
||||
Help: "Total time to call a state transition in onBlock()",
|
||||
})
|
||||
processAttsElapsedTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "process_attestations_milliseconds",
|
||||
Help: "Captures latency for process attestations (forkchoice) in milliseconds",
|
||||
Buckets: []float64{1, 5, 20, 100, 500, 1000},
|
||||
},
|
||||
)
|
||||
newAttHeadElapsedTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "new_att_head_milliseconds",
|
||||
Help: "Captures latency for new attestation head in milliseconds",
|
||||
Buckets: []float64{1, 5, 20, 100, 500, 1000},
|
||||
},
|
||||
)
|
||||
newBlockHeadElapsedTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "new_block_head_milliseconds",
|
||||
Help: "Captures latency for new block head in milliseconds",
|
||||
Buckets: []float64{1, 5, 20, 100, 500, 1000},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -98,6 +98,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err := consensusblocks.BeaconBlockIsNil(signed); err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
startTime := time.Now()
|
||||
b := signed.Block()
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, b)
|
||||
@@ -115,10 +116,13 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stateTransitionStartTime := time.Now()
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
|
||||
|
||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -182,10 +186,14 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
if err := s.notifyEngineIfChangedHead(ctx, headRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -262,7 +270,11 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
|
||||
}
|
||||
defer reportAttestationInclusion(b)
|
||||
return s.handleEpochBoundary(ctx, postState)
|
||||
if err := s.handleEpochBoundary(ctx, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func getStateVersionAndPayload(st state.BeaconState) (int, *enginev1.ExecutionPayloadHeader, error) {
|
||||
|
||||
@@ -135,11 +135,21 @@ func (s *Service) verifyBlkFinalizedSlot(b interfaces.BeaconBlock) error {
|
||||
}
|
||||
|
||||
// updateFinalized saves the init sync blocks, finalized checkpoint, migrates
|
||||
// to cold old states and saves the last validated checkpoint to DB
|
||||
// to cold old states and saves the last validated checkpoint to DB. It returns
|
||||
// early if the new checkpoint is older than the one on db.
|
||||
func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateFinalized")
|
||||
defer span.End()
|
||||
|
||||
// return early if new checkpoint is not newer than the one in DB
|
||||
currentFinalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cp.Epoch <= currentFinalized.Epoch {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Blocks need to be saved so that we can retrieve finalized block from
|
||||
// DB when migrating states.
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
@@ -267,7 +277,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if len(pendingNodes) == 1 {
|
||||
return nil
|
||||
}
|
||||
if root != s.ensureRootNotZeros(finalized.Root) {
|
||||
if root != s.ensureRootNotZeros(finalized.Root) && !s.ForkChoicer().HasNode(root) {
|
||||
return errNotDescendantOfFinalized
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertOptimisticChain(ctx, pendingNodes)
|
||||
|
||||
@@ -147,17 +147,22 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
s.processAttestationsLock.Lock()
|
||||
defer s.processAttestationsLock.Unlock()
|
||||
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx)
|
||||
processAttsElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
justified := s.ForkChoicer().JustifiedCheckpoint()
|
||||
balances, err := s.justifiedBalances.get(ctx, justified.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start = time.Now()
|
||||
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
|
||||
s.headLock.RLock()
|
||||
if s.headRoot() != newHeadRoot {
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
@@ -30,8 +30,7 @@ type WeakSubjectivityVerifier struct {
|
||||
// NewWeakSubjectivityVerifier validates a checkpoint, and if valid, uses it to initialize a weak subjectivity verifier.
|
||||
func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (*WeakSubjectivityVerifier, error) {
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
log.Info("--weak-subjectivity-checkpoint not provided. Prysm recommends providing a weak subjectivity checkpoint " +
|
||||
"for nodes synced from genesis, or manual verification of block and state roots for checkpoint sync nodes.")
|
||||
log.Debug("--weak-subjectivity-checkpoint not provided")
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: false,
|
||||
}, nil
|
||||
|
||||
@@ -2,7 +2,6 @@ package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -66,12 +65,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
|
||||
// Is the builder up?
|
||||
if err := s.c.Status(ctx); err != nil {
|
||||
return nil, fmt.Errorf("could not connect to builder: %v", err)
|
||||
log.WithError(err).Error("Failed to check builder status")
|
||||
} else {
|
||||
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
|
||||
}
|
||||
|
||||
log.WithField("endpoint", c.NodeURL()).Info("Builder has been configured")
|
||||
log.Warn("Outsourcing block construction to external builders adds non-trivial delay to block propagation time. " +
|
||||
"Builder-constructed blocks or fallback blocks may get orphaned. Use at your own risk!")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func (dc *DepositCache) PendingDeposits(ctx context.Context, untilBlk *big.Int)
|
||||
|
||||
depositCntrs := dc.PendingContainers(ctx, untilBlk)
|
||||
|
||||
var deposits []*ethpb.Deposit
|
||||
deposits := make([]*ethpb.Deposit, 0, len(depositCntrs))
|
||||
for _, dep := range depositCntrs {
|
||||
deposits = append(deposits, dep.Deposit)
|
||||
}
|
||||
@@ -71,7 +71,7 @@ func (dc *DepositCache) PendingContainers(ctx context.Context, untilBlk *big.Int
|
||||
dc.depositsLock.RLock()
|
||||
defer dc.depositsLock.RUnlock()
|
||||
|
||||
var depositCntrs []*ethpb.DepositContainer
|
||||
depositCntrs := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
|
||||
for _, ctnr := range dc.pendingDeposits {
|
||||
if untilBlk == nil || untilBlk.Uint64() >= ctnr.Eth1BlockHeight {
|
||||
depositCntrs = append(depositCntrs, ctnr)
|
||||
@@ -139,7 +139,7 @@ func (dc *DepositCache) PrunePendingDeposits(ctx context.Context, merkleTreeInde
|
||||
dc.depositsLock.Lock()
|
||||
defer dc.depositsLock.Unlock()
|
||||
|
||||
var cleanDeposits []*ethpb.DepositContainer
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(dc.pendingDeposits))
|
||||
for _, dp := range dc.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"beacon_committee.go",
|
||||
"block.go",
|
||||
"genesis.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
"rewards_penalties.go",
|
||||
"shuffle.go",
|
||||
|
||||
@@ -51,10 +51,11 @@ func ValidateSlotTargetEpoch(data *ethpb.AttestationData) error {
|
||||
// committee count as an argument allows cheaper computation at run time.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
|
||||
// committee = get_beacon_committee(state, slot, index)
|
||||
// modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
|
||||
// return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
|
||||
//
|
||||
// def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
|
||||
// committee = get_beacon_committee(state, slot, index)
|
||||
// modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
|
||||
// return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
|
||||
func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
|
||||
modulo := uint64(1)
|
||||
if committeeCount/params.BeaconConfig().TargetAggregatorsPerCommittee > 1 {
|
||||
@@ -68,9 +69,10 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
|
||||
// AggregateSignature returns the aggregated signature of the input attestations.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
|
||||
// signatures = [attestation.signature for attestation in attestations]
|
||||
// return bls.Aggregate(signatures)
|
||||
//
|
||||
// def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature:
|
||||
// signatures = [attestation.signature for attestation in attestations]
|
||||
// return bls.Aggregate(signatures)
|
||||
func AggregateSignature(attestations []*ethpb.Attestation) (bls.Signature, error) {
|
||||
sigs := make([]bls.Signature, len(attestations))
|
||||
var err error
|
||||
@@ -95,14 +97,15 @@ func IsAggregated(attestation *ethpb.Attestation) bool {
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
|
||||
// """
|
||||
// Compute the correct subnet for an attestation for Phase 0.
|
||||
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
// """
|
||||
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
// """
|
||||
// Compute the correct subnet for an attestation for Phase 0.
|
||||
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
// """
|
||||
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
func ComputeSubnetForAttestation(activeValCount uint64, att *ethpb.Attestation) uint64 {
|
||||
return ComputeSubnetFromCommitteeAndSlot(activeValCount, att.Data.CommitteeIndex, att.Data.Slot)
|
||||
}
|
||||
@@ -112,14 +115,15 @@ func ComputeSubnetForAttestation(activeValCount uint64, att *ethpb.Attestation)
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
// def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
|
||||
// """
|
||||
// Compute the correct subnet for an attestation for Phase 0.
|
||||
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
// """
|
||||
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
// """
|
||||
// Compute the correct subnet for an attestation for Phase 0.
|
||||
// Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||
// """
|
||||
// slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||
// committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||
//
|
||||
// return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
|
||||
func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx types.CommitteeIndex, attSlot types.Slot) uint64 {
|
||||
slotSinceStart := slots.SinceEpochStarts(attSlot)
|
||||
comCount := SlotCommitteeCount(activeValCount)
|
||||
@@ -133,13 +137,15 @@ func ComputeSubnetFromCommitteeAndSlot(activeValCount uint64, comIdx types.Commi
|
||||
// slots.
|
||||
//
|
||||
// Example:
|
||||
// ATTESTATION_PROPAGATION_SLOT_RANGE = 5
|
||||
// clockDisparity = 24 seconds
|
||||
// current_slot = 100
|
||||
// invalid_attestation_slot = 92
|
||||
// invalid_attestation_slot = 103
|
||||
// valid_attestation_slot = 98
|
||||
// valid_attestation_slot = 101
|
||||
//
|
||||
// ATTESTATION_PROPAGATION_SLOT_RANGE = 5
|
||||
// clockDisparity = 24 seconds
|
||||
// current_slot = 100
|
||||
// invalid_attestation_slot = 92
|
||||
// invalid_attestation_slot = 103
|
||||
// valid_attestation_slot = 98
|
||||
// valid_attestation_slot = 101
|
||||
//
|
||||
// In the attestation must be within the range of 95 to 102 in the example above.
|
||||
func ValidateAttestationTime(attSlot types.Slot, genesisTime time.Time, clockDisparity time.Duration) error {
|
||||
if err := slots.ValidateClock(attSlot, uint64(genesisTime.Unix())); err != nil {
|
||||
@@ -170,13 +176,19 @@ func ValidateAttestationTime(attSlot types.Slot, genesisTime time.Time, clockDis
|
||||
lowerBounds := lowerTime.Add(-clockDisparity)
|
||||
|
||||
// Verify attestation slot within the time range.
|
||||
if attTime.Before(lowerBounds) || attTime.After(upperBounds) {
|
||||
return fmt.Errorf(
|
||||
"attestation slot %d not within attestation propagation range of %d to %d (current slot)",
|
||||
attSlot,
|
||||
lowerBoundsSlot,
|
||||
currentSlot,
|
||||
)
|
||||
attError := fmt.Errorf(
|
||||
"attestation slot %d not within attestation propagation range of %d to %d (current slot)",
|
||||
attSlot,
|
||||
lowerBoundsSlot,
|
||||
currentSlot,
|
||||
)
|
||||
if attTime.Before(lowerBounds) {
|
||||
attReceivedTooEarlyCount.Inc()
|
||||
return attError
|
||||
}
|
||||
if attTime.After(upperBounds) {
|
||||
attReceivedTooLateCount.Inc()
|
||||
return attError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func UpdateGenesisEth1Data(state state.BeaconState, deposits []*ethpb.Deposit, e
|
||||
return nil, errors.New("no eth1data provided for genesis state")
|
||||
}
|
||||
|
||||
var leaves [][]byte
|
||||
leaves := make([][]byte, 0, len(deposits))
|
||||
for _, deposit := range deposits {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, fmt.Errorf("nil deposit or deposit with nil data cannot be processed: %v", deposit)
|
||||
|
||||
17
beacon-chain/core/helpers/metrics.go
Normal file
17
beacon-chain/core/helpers/metrics.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
attReceivedTooEarlyCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "attestation_too_early_total",
|
||||
Help: "Increased when an attestation is considered too early",
|
||||
})
|
||||
attReceivedTooLateCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "attestation_too_late_total",
|
||||
Help: "Increased when an attestation is considered too late",
|
||||
})
|
||||
)
|
||||
@@ -57,6 +57,7 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_dgraph_io_ristretto//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
@@ -55,6 +55,14 @@ var (
|
||||
Name: "validator_entry_cache_delete_total",
|
||||
Help: "The total number of cache deletes on the validator entry cache.",
|
||||
})
|
||||
stateReadingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "db_beacon_state_reading_milliseconds",
|
||||
Help: "Milliseconds it takes to read a beacon state from the DB",
|
||||
})
|
||||
stateSavingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "db_beacon_state_saving_milliseconds",
|
||||
Help: "Milliseconds it takes to save a beacon state to the DB",
|
||||
})
|
||||
)
|
||||
|
||||
// BlockCacheSize specifies 1000 slots worth of blocks cached, which
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.State")
|
||||
defer span.End()
|
||||
startTime := time.Now()
|
||||
enc, err := s.stateBytes(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -44,7 +46,12 @@ func (s *Store) State(ctx context.Context, blockRoot [32]byte) (state.BeaconStat
|
||||
return nil, valErr
|
||||
}
|
||||
|
||||
return s.unmarshalState(ctx, enc, valEntries)
|
||||
st, err := s.unmarshalState(ctx, enc, valEntries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateReadingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return st, err
|
||||
}
|
||||
|
||||
// StateOrError is just like State(), except it only returns a non-error response
|
||||
@@ -127,6 +134,7 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
|
||||
if states == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
startTime := time.Now()
|
||||
multipleEncs := make([][]byte, len(states))
|
||||
for i, st := range states {
|
||||
stateBytes, err := marshalState(ctx, st)
|
||||
@@ -136,7 +144,7 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
|
||||
multipleEncs[i] = stateBytes
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
for i, rt := range blockRoots {
|
||||
indicesByBucket := createStateIndicesFromStateSlot(ctx, states[i].Slot())
|
||||
@@ -148,7 +156,11 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
stateSavingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
type withValidators interface {
|
||||
@@ -760,8 +772,10 @@ func createStateIndicesFromStateSlot(ctx context.Context, slot types.Slot) map[s
|
||||
// Only following states would be kept:
|
||||
// 1.) state_slot % archived_interval == 0. (e.g. archived_interval=2048, states with slot 2048, 4096... etc)
|
||||
// 2.) archived_interval - archived_interval/3 < state_slot % archived_interval
|
||||
// (e.g. archived_interval=2048, states with slots after 1365).
|
||||
// This is to tolerate skip slots. Not every state lays on the boundary.
|
||||
//
|
||||
// (e.g. archived_interval=2048, states with slots after 1365).
|
||||
// This is to tolerate skip slots. Not every state lays on the boundary.
|
||||
//
|
||||
// 3.) state with current finalized root
|
||||
// 4.) unfinalized States
|
||||
func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint types.Slot) error {
|
||||
|
||||
@@ -24,12 +24,18 @@ var (
|
||||
configMismatchLog = "Configuration mismatch between your execution client and Prysm. " +
|
||||
"Please check your execution client and restart it with the proper configuration. If this is not done, " +
|
||||
"your node will not be able to complete the proof-of-stake transition"
|
||||
needsEnginePortLog = "Could not check execution client configuration. " +
|
||||
"You are probably connecting to your execution client on the wrong port. For the Ethereum " +
|
||||
"merge, you will need to connect to your " +
|
||||
"execution client on port 8551 rather than 8545. This is known as the 'engine API' port and needs to be " +
|
||||
"authenticated if connecting via HTTP. See our documentation on how to set up this up here " +
|
||||
"https://docs.prylabs.network/docs/execution-node/authentication"
|
||||
)
|
||||
|
||||
// Checks the transition configuration between Prysm and the connected execution node to ensure
|
||||
// there are no differences in terminal block difficulty and block hash.
|
||||
// If there are any discrepancies, we must log errors to ensure users can resolve
|
||||
//the problem and be ready for the merge transition.
|
||||
// the problem and be ready for the merge transition.
|
||||
func (s *Service) checkTransitionConfiguration(
|
||||
ctx context.Context, blockNotifications chan *feed.Event,
|
||||
) {
|
||||
@@ -48,10 +54,14 @@ func (s *Service) checkTransitionConfiguration(
|
||||
}
|
||||
err := s.ExchangeTransitionConfiguration(ctx, cfg)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrConfigMismatch) {
|
||||
switch {
|
||||
case errors.Is(err, ErrConfigMismatch):
|
||||
log.WithError(err).Fatal(configMismatchLog)
|
||||
case errors.Is(err, ErrMethodNotFound):
|
||||
log.WithError(err).Error(needsEnginePortLog)
|
||||
default:
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
|
||||
// We poll the execution client to see if the transition configuration has changed.
|
||||
@@ -115,6 +125,9 @@ func (s *Service) handleExchangeConfigurationError(err error) {
|
||||
s.runError = err
|
||||
log.WithError(err).Error(configMismatchLog)
|
||||
return
|
||||
} else if errors.Is(err, ErrMethodNotFound) {
|
||||
log.WithError(err).Error(needsEnginePortLog)
|
||||
return
|
||||
}
|
||||
log.WithError(err).Error("Could not check configuration values between execution and consensus client")
|
||||
}
|
||||
|
||||
@@ -537,22 +537,31 @@ func handleRPCError(err error) error {
|
||||
}
|
||||
switch e.ErrorCode() {
|
||||
case -32700:
|
||||
errParseCount.Inc()
|
||||
return ErrParse
|
||||
case -32600:
|
||||
errInvalidRequestCount.Inc()
|
||||
return ErrInvalidRequest
|
||||
case -32601:
|
||||
errMethodNotFoundCount.Inc()
|
||||
return ErrMethodNotFound
|
||||
case -32602:
|
||||
errInvalidParamsCount.Inc()
|
||||
return ErrInvalidParams
|
||||
case -32603:
|
||||
errInternalCount.Inc()
|
||||
return ErrInternal
|
||||
case -38001:
|
||||
errUnknownPayloadCount.Inc()
|
||||
return ErrUnknownPayload
|
||||
case -38002:
|
||||
errInvalidForkchoiceStateCount.Inc()
|
||||
return ErrInvalidForkchoiceState
|
||||
case -38003:
|
||||
errInvalidPayloadAttributesCount.Inc()
|
||||
return ErrInvalidPayloadAttributes
|
||||
case -32000:
|
||||
errServerErrorCount.Inc()
|
||||
// Only -32000 status codes are data errors in the RPC specification.
|
||||
errWithData, ok := err.(rpc.DataError)
|
||||
if !ok {
|
||||
|
||||
@@ -403,6 +403,10 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
|
||||
}
|
||||
}
|
||||
|
||||
s.latestEth1DataLock.RLock()
|
||||
lastReqBlock := s.latestEth1Data.LastRequestedBlock
|
||||
s.latestEth1DataLock.RUnlock()
|
||||
|
||||
for _, filterLog := range logs {
|
||||
if filterLog.BlockNumber > currentBlockNum {
|
||||
if err := s.checkHeaderRange(ctx, currentBlockNum, filterLog.BlockNumber-1, headersMap, requestHeaders); err != nil {
|
||||
@@ -415,6 +419,13 @@ func (s *Service) processBlockInBatch(ctx context.Context, currentBlockNum uint6
|
||||
currentBlockNum = filterLog.BlockNumber
|
||||
}
|
||||
if err := s.ProcessLog(ctx, filterLog); err != nil {
|
||||
// In the event the execution client gives us a garbled/bad log
|
||||
// we reset the last requested block to the previous valid block range. This
|
||||
// prevents the beacon from advancing processing of logs to another range
|
||||
// in the event of an execution client failure.
|
||||
s.latestEth1DataLock.Lock()
|
||||
s.latestEth1Data.LastRequestedBlock = lastReqBlock
|
||||
s.latestEth1DataLock.Unlock()
|
||||
return 0, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,6 +31,42 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_parse_error_count",
|
||||
Help: "The number of errors that occurred while parsing execution payload",
|
||||
})
|
||||
errInvalidRequestCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_invalid_request_count",
|
||||
Help: "The number of errors that occurred due to invalid request",
|
||||
})
|
||||
errMethodNotFoundCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_method_not_found_count",
|
||||
Help: "The number of errors that occurred due to method not found",
|
||||
})
|
||||
errInvalidParamsCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_invalid_params_count",
|
||||
Help: "The number of errors that occurred due to invalid params",
|
||||
})
|
||||
errInternalCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_internal_error_count",
|
||||
Help: "The number of errors that occurred due to internal error",
|
||||
})
|
||||
errUnknownPayloadCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_unknown_payload_count",
|
||||
Help: "The number of errors that occurred due to unknown payload",
|
||||
})
|
||||
errInvalidForkchoiceStateCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_invalid_forkchoice_state_count",
|
||||
Help: "The number of errors that occurred due to invalid forkchoice state",
|
||||
})
|
||||
errInvalidPayloadAttributesCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_invalid_payload_attributes_count",
|
||||
Help: "The number of errors that occurred due to invalid payload attributes",
|
||||
})
|
||||
errServerErrorCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_server_error_count",
|
||||
Help: "The number of errors that occurred due to server error",
|
||||
})
|
||||
reconstructedExecutionPayloadCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "reconstructed_execution_payload_count",
|
||||
Help: "Count the number of execution payloads that are reconstructed using JSON-RPC from payload headers",
|
||||
|
||||
@@ -36,6 +36,14 @@ func WithHttpEndpointAndJWTSecret(endpointString string, secret []byte) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithHeaders adds headers to the execution node JSON-RPC requests.
|
||||
func WithHeaders(headers []string) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.headers = headers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDepositContractAddress for the deposit contract.
|
||||
func WithDepositContractAddress(addr common.Address) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
@@ -37,7 +38,13 @@ func (s *Service) setupExecutionClientConnections(ctx context.Context, currEndpo
|
||||
// Ensure we have the correct chain and deposit IDs.
|
||||
if err := ensureCorrectExecutionChain(ctx, fetcher); err != nil {
|
||||
client.Close()
|
||||
return errors.Wrap(err, "could not make initial request to verify execution chain ID")
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "401 Unauthorized") {
|
||||
errStr = "could not verify execution chain ID as your connection is not authenticated. " +
|
||||
"If connecting to your execution client via HTTP, you will need to set up JWT authentication. " +
|
||||
"See our documentation here https://docs.prylabs.network/docs/execution-node/authentication"
|
||||
}
|
||||
return errors.Wrap(err, errStr)
|
||||
}
|
||||
s.updateConnectedETH1(true)
|
||||
s.runError = nil
|
||||
@@ -113,7 +120,7 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "":
|
||||
case "", "ipc":
|
||||
client, err = gethRPC.DialIPC(ctx, endpoint.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -128,6 +135,16 @@ func (s *Service) newRPCClientWithAuth(ctx context.Context, endpoint network.End
|
||||
}
|
||||
client.SetHeader("Authorization", header)
|
||||
}
|
||||
for _, h := range s.cfg.headers {
|
||||
if h != "" {
|
||||
keyValue := strings.Split(h, "=")
|
||||
if len(keyValue) < 2 {
|
||||
log.Warnf("Incorrect HTTP header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
client.SetHeader(keyValue[0], strings.Join(keyValue[1:], "="))
|
||||
}
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -66,10 +66,6 @@ var (
|
||||
logThreshold = 8
|
||||
// period to log chainstart related information
|
||||
logPeriod = 1 * time.Minute
|
||||
// threshold of how old we will accept an eth1 node's head to be.
|
||||
eth1Threshold = 20 * time.Minute
|
||||
// error when eth1 node is too far behind.
|
||||
errFarBehind = errors.Errorf("eth1 head is more than %s behind from current wall clock time", eth1Threshold.String())
|
||||
)
|
||||
|
||||
// ChainStartFetcher retrieves information pertaining to the chain start event
|
||||
@@ -128,6 +124,7 @@ type config struct {
|
||||
eth1HeaderReqLimit uint64
|
||||
beaconNodeStatsUpdater BeaconNodeStatsUpdater
|
||||
currHttpEndpoint network.Endpoint
|
||||
headers []string
|
||||
finalizedStateAtStartup state.BeaconState
|
||||
}
|
||||
|
||||
@@ -316,11 +313,6 @@ func (s *Service) updateBeaconNodeStats() {
|
||||
s.cfg.beaconNodeStatsUpdater.Update(bs)
|
||||
}
|
||||
|
||||
func (s *Service) updateCurrHttpEndpoint(endpoint network.Endpoint) {
|
||||
s.cfg.currHttpEndpoint = endpoint
|
||||
s.updateBeaconNodeStats()
|
||||
}
|
||||
|
||||
func (s *Service) updateConnectedETH1(state bool) {
|
||||
s.connectedETH1 = state
|
||||
s.updateBeaconNodeStats()
|
||||
@@ -608,11 +600,6 @@ func (s *Service) run(done <-chan struct{}) {
|
||||
log.WithError(err).Debug("Could not fetch latest eth1 header")
|
||||
continue
|
||||
}
|
||||
if eth1HeadIsBehind(head.Time) {
|
||||
s.pollConnectionStatus(s.ctx)
|
||||
log.WithError(errFarBehind).Debug("Could not get an up to date eth1 header")
|
||||
continue
|
||||
}
|
||||
s.processBlockHeader(head)
|
||||
s.handleETH1FollowDistance()
|
||||
case <-chainstartTicker.C:
|
||||
@@ -838,11 +825,3 @@ func dedupEndpoints(endpoints []string) []string {
|
||||
}
|
||||
return newEndpoints
|
||||
}
|
||||
|
||||
// Checks if the provided timestamp is beyond the prescribed bound from
|
||||
// the current wall clock time.
|
||||
func eth1HeadIsBehind(timestamp uint64) bool {
|
||||
timeout := prysmTime.Now().Add(-eth1Threshold)
|
||||
// check that web3 client is syncing
|
||||
return time.Unix(int64(timestamp), 0).Before(timeout) // lint:ignore uintcast -- timestamp will not exceed int64 in your lifetime.
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ func TestStart_OK(t *testing.T) {
|
||||
WithDepositContractAddress(testAcc.ContractAddr),
|
||||
WithDatabase(beaconDB),
|
||||
)
|
||||
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
|
||||
require.NoError(t, err, "unable to setup execution service")
|
||||
web3Service = setDefaultMocks(web3Service)
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
web3Service.depositContractCaller, err = contracts.NewDepositContractCaller(testAcc.ContractAddr, testAcc.Backend)
|
||||
@@ -156,7 +156,7 @@ func TestStart_OK(t *testing.T) {
|
||||
web3Service.Start()
|
||||
if len(hook.Entries) > 0 {
|
||||
msg := hook.LastEntry().Message
|
||||
want := "Could not connect to ETH1.0 chain RPC client"
|
||||
want := "Could not connect to execution endpoint"
|
||||
if strings.Contains(want, msg) {
|
||||
t.Errorf("incorrect log, expected %s, got %s", want, msg)
|
||||
}
|
||||
@@ -752,15 +752,6 @@ func TestService_ValidateDepositContainers(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsChecked(t *testing.T) {
|
||||
timestamp := uint64(time.Now().Unix())
|
||||
assert.Equal(t, false, eth1HeadIsBehind(timestamp))
|
||||
|
||||
// Give an older timestmap beyond threshold.
|
||||
timestamp = uint64(time.Now().Add(-eth1Threshold).Add(-1 * time.Minute).Unix())
|
||||
assert.Equal(t, true, eth1HeadIsBehind(timestamp))
|
||||
}
|
||||
|
||||
func TestETH1Endpoints(t *testing.T) {
|
||||
server, firstEndpoint, err := mockExecution.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -62,12 +63,14 @@ go_test(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -3,6 +3,7 @@ package doublylinkedtree
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -82,7 +84,8 @@ func (f *ForkChoice) Head(
|
||||
|
||||
jc := f.JustifiedCheckpoint()
|
||||
fc := f.FinalizedCheckpoint()
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch); err != nil {
|
||||
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(f.store.genesisTime), 0))
|
||||
if err := f.store.treeRootNode.updateBestDescendant(ctx, jc.Epoch, fc.Epoch, currentEpoch); err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not update best descendant")
|
||||
}
|
||||
return f.store.head(ctx)
|
||||
@@ -490,30 +493,31 @@ func (f *ForkChoice) UpdateFinalizedCheckpoint(fc *forkchoicetypes.Checkpoint) e
|
||||
}
|
||||
|
||||
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
|
||||
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
|
||||
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, types.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublelinkedtree.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if the input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, nil
|
||||
}
|
||||
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
n1, ok := f.store.nodeByRoot[r1]
|
||||
if !ok || n1 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
// Do nothing if the input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, n1.slot, nil
|
||||
}
|
||||
|
||||
n2, ok := f.store.nodeByRoot[r2]
|
||||
if !ok || n2 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, ctx.Err()
|
||||
return [32]byte{}, 0, ctx.Err()
|
||||
}
|
||||
if n1.slot > n2.slot {
|
||||
n1 = n1.parent
|
||||
@@ -521,17 +525,17 @@ func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32
|
||||
// This should not happen at runtime as the finalized
|
||||
// node has to be a common ancestor
|
||||
if n1 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
} else {
|
||||
n2 = n2.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if n2 == nil {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
}
|
||||
if n1 == n2 {
|
||||
return n1.root, nil
|
||||
return n1.root, n1.slot, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -612,3 +616,52 @@ func (f *ForkChoice) JustifiedPayloadBlockHash() [32]byte {
|
||||
}
|
||||
return node.payloadHash
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns a full dump of forkhoice.
|
||||
func (f *ForkChoice) ForkChoiceDump(ctx context.Context) (*v1.ForkChoiceResponse, error) {
|
||||
jc := &v1.Checkpoint{
|
||||
Epoch: f.store.justifiedCheckpoint.Epoch,
|
||||
Root: f.store.justifiedCheckpoint.Root[:],
|
||||
}
|
||||
bjc := &v1.Checkpoint{
|
||||
Epoch: f.store.bestJustifiedCheckpoint.Epoch,
|
||||
Root: f.store.bestJustifiedCheckpoint.Root[:],
|
||||
}
|
||||
ujc := &v1.Checkpoint{
|
||||
Epoch: f.store.unrealizedJustifiedCheckpoint.Epoch,
|
||||
Root: f.store.unrealizedJustifiedCheckpoint.Root[:],
|
||||
}
|
||||
fc := &v1.Checkpoint{
|
||||
Epoch: f.store.finalizedCheckpoint.Epoch,
|
||||
Root: f.store.finalizedCheckpoint.Root[:],
|
||||
}
|
||||
ufc := &v1.Checkpoint{
|
||||
Epoch: f.store.unrealizedFinalizedCheckpoint.Epoch,
|
||||
Root: f.store.unrealizedFinalizedCheckpoint.Root[:],
|
||||
}
|
||||
nodes := make([]*v1.ForkChoiceNode, 0, f.NodeCount())
|
||||
var err error
|
||||
if f.store.treeRootNode != nil {
|
||||
nodes, err = f.store.treeRootNode.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var headRoot [32]byte
|
||||
if f.store.headNode != nil {
|
||||
headRoot = f.store.headNode.root
|
||||
}
|
||||
resp := &v1.ForkChoiceResponse{
|
||||
JustifiedCheckpoint: jc,
|
||||
BestJustifiedCheckpoint: bjc,
|
||||
UnrealizedJustifiedCheckpoint: ujc,
|
||||
FinalizedCheckpoint: fc,
|
||||
UnrealizedFinalizedCheckpoint: ufc,
|
||||
ProposerBoostRoot: f.store.proposerBoostRoot[:],
|
||||
PreviousProposerBoostRoot: f.store.previousProposerBoostRoot[:],
|
||||
HeadRoot: headRoot[:],
|
||||
ForkchoiceNodes: nodes,
|
||||
}
|
||||
return resp, nil
|
||||
|
||||
}
|
||||
|
||||
@@ -208,7 +208,7 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
require.Equal(t, uint64(10), f.store.nodeByRoot[[32]byte{'1'}].weight)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'2'}].weight)
|
||||
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1))
|
||||
require.NoError(t, f.store.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
require.DeepEqual(t, [32]byte{'3'}, f.store.treeRootNode.bestDescendant.root)
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
@@ -408,73 +408,85 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between c and b is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and d is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'d'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and e is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'e'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and f is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between f and h is c",
|
||||
r1: [32]byte{'f'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and h is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and h is a",
|
||||
r1: [32]byte{'b'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'e'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between i and f is c",
|
||||
r1: [32]byte{'i'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'j'},
|
||||
r2: [32]byte{'g'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -497,46 +509,53 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between a and b is a",
|
||||
r1: [32]byte{'a'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and d is b",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'b'},
|
||||
wantSlot: 1,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between d and a is a",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'a'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
// Equal inputs should return the same root.
|
||||
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'b'}, r)
|
||||
require.Equal(t, types.Slot(1), s)
|
||||
// Requesting finalized root (last node) should return the same root.
|
||||
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
r, s, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'a'}, r)
|
||||
require.Equal(t, types.Slot(0), s)
|
||||
// Requesting unknown root
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
n := &Node{
|
||||
slot: 100,
|
||||
@@ -550,7 +569,7 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
|
||||
f.store.nodeByRoot[[32]byte{'y'}] = n
|
||||
// broken link
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
)
|
||||
|
||||
// depth returns the length of the path to the root of Fork Choice
|
||||
@@ -42,7 +44,7 @@ func (n *Node) applyWeightChanges(ctx context.Context) error {
|
||||
|
||||
// updateBestDescendant updates the best descendant of this node and its
|
||||
// children. This function assumes the caller has a lock on Store.nodesLock
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch types.Epoch) error {
|
||||
func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
@@ -58,10 +60,10 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
|
||||
if child == nil {
|
||||
return errors.Wrap(ErrNilNode, "could not update best descendant")
|
||||
}
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch); err != nil {
|
||||
if err := child.updateBestDescendant(ctx, justifiedEpoch, finalizedEpoch, currentEpoch); err != nil {
|
||||
return err
|
||||
}
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch)
|
||||
childLeadsToViableHead := child.leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch)
|
||||
if childLeadsToViableHead && !hasViableDescendant {
|
||||
// The child leads to a viable head, but the current
|
||||
// parent's best child doesn't.
|
||||
@@ -96,18 +98,24 @@ func (n *Node) updateBestDescendant(ctx context.Context, justifiedEpoch, finaliz
|
||||
// viableForHead returns true if the node is viable to head.
|
||||
// Any node with different finalized or justified epoch than
|
||||
// the ones in fork choice store should not be viable to head.
|
||||
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
|
||||
func (n *Node) viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) bool {
|
||||
justified := justifiedEpoch == n.justifiedEpoch || justifiedEpoch == 0
|
||||
finalized := finalizedEpoch == n.finalizedEpoch || finalizedEpoch == 0
|
||||
if features.Get().EnableDefensivePull && !justified && justifiedEpoch+1 == currentEpoch {
|
||||
if n.unrealizedJustifiedEpoch+1 >= currentEpoch {
|
||||
justified = true
|
||||
finalized = true
|
||||
}
|
||||
}
|
||||
|
||||
return justified && finalized
|
||||
}
|
||||
|
||||
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) bool {
|
||||
func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch, currentEpoch types.Epoch) bool {
|
||||
if n.bestDescendant == nil {
|
||||
return n.viableForHead(justifiedEpoch, finalizedEpoch)
|
||||
return n.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
|
||||
}
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch, currentEpoch)
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
@@ -126,3 +134,38 @@ func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
}
|
||||
return n.parent.setNodeAndParentValidated(ctx)
|
||||
}
|
||||
|
||||
// nodeTreeDump appends to the given list all the nodes descending from this one
|
||||
func (n *Node) nodeTreeDump(ctx context.Context, nodes []*v1.ForkChoiceNode) ([]*v1.ForkChoiceNode, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
var parentRoot [32]byte
|
||||
if n.parent != nil {
|
||||
parentRoot = n.parent.root
|
||||
}
|
||||
thisNode := &v1.ForkChoiceNode{
|
||||
Slot: n.slot,
|
||||
Root: n.root[:],
|
||||
ParentRoot: parentRoot[:],
|
||||
JustifiedEpoch: n.justifiedEpoch,
|
||||
FinalizedEpoch: n.finalizedEpoch,
|
||||
UnrealizedJustifiedEpoch: n.unrealizedJustifiedEpoch,
|
||||
UnrealizedFinalizedEpoch: n.unrealizedFinalizedEpoch,
|
||||
Balance: n.balance,
|
||||
Weight: n.weight,
|
||||
ExecutionOptimistic: n.optimistic,
|
||||
ExecutionPayload: n.payloadHash[:],
|
||||
Timestamp: n.timestamp,
|
||||
}
|
||||
|
||||
nodes = append(nodes, thisNode)
|
||||
var err error
|
||||
for _, child := range n.children {
|
||||
nodes, err = child.nodeTreeDump(ctx, nodes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
)
|
||||
@@ -113,7 +114,7 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
s.nodeByRoot[indexToHash(2)].weight = 200
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[1], s.treeRootNode.bestDescendant)
|
||||
@@ -133,7 +134,7 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
s.nodeByRoot[indexToHash(2)].weight = 100
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1))
|
||||
assert.NoError(t, s.treeRootNode.updateBestDescendant(ctx, 1, 1, 1))
|
||||
|
||||
assert.Equal(t, 2, len(s.treeRootNode.children))
|
||||
assert.Equal(t, s.treeRootNode.children[0], s.treeRootNode.bestDescendant)
|
||||
@@ -173,7 +174,7 @@ func TestNode_ViableForHead(t *testing.T) {
|
||||
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, true},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch)
|
||||
got := tc.n.viableForHead(tc.justifiedEpoch, tc.finalizedEpoch, 5)
|
||||
assert.Equal(t, tc.want, got)
|
||||
}
|
||||
}
|
||||
@@ -197,15 +198,17 @@ func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3))
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 3, 5))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 3, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(2)].leadsToViableHead(4, 3, 5))
|
||||
require.Equal(t, false, f.store.nodeByRoot[indexToHash(4)].leadsToViableHead(4, 3, 5))
|
||||
}
|
||||
|
||||
func TestNode_SetFullyValidated(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
storeNodes := make([]*Node, 6)
|
||||
storeNodes[0] = f.store.treeRootNode
|
||||
// insert blocks in the fork pattern (optimistic status in parenthesis)
|
||||
//
|
||||
// 0 (false) -- 1 (false) -- 2 (false) -- 3 (true) -- 4 (true)
|
||||
@@ -215,20 +218,25 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[1] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[2] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[3] = f.store.nodeByRoot[blkRoot]
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[4] = f.store.nodeByRoot[blkRoot]
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[5] = f.store.nodeByRoot[blkRoot]
|
||||
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
@@ -253,4 +261,22 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
opt, err = f.IsOptimistic(indexToHash(3))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
respNodes := make([]*v1.ForkChoiceNode, 0)
|
||||
respNodes, err = f.store.treeRootNode.nodeTreeDump(ctx, respNodes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(respNodes), f.NodeCount())
|
||||
|
||||
for i, respNode := range respNodes {
|
||||
require.Equal(t, storeNodes[i].slot, respNode.Slot)
|
||||
require.DeepEqual(t, storeNodes[i].root[:], respNode.Root)
|
||||
require.Equal(t, storeNodes[i].balance, respNode.Balance)
|
||||
require.Equal(t, storeNodes[i].weight, respNode.Weight)
|
||||
require.Equal(t, storeNodes[i].optimistic, respNode.ExecutionOptimistic)
|
||||
require.Equal(t, storeNodes[i].justifiedEpoch, respNode.JustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].unrealizedJustifiedEpoch, respNode.UnrealizedJustifiedEpoch)
|
||||
require.Equal(t, storeNodes[i].finalizedEpoch, respNode.FinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].unrealizedFinalizedEpoch, respNode.UnrealizedFinalizedEpoch)
|
||||
require.Equal(t, storeNodes[i].timestamp, respNode.Timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,8 +95,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
if bestDescendant == nil {
|
||||
bestDescendant = justifiedNode
|
||||
}
|
||||
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch) {
|
||||
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(s.genesisTime), 0))
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch, currentEpoch) {
|
||||
s.allTipsAreInvalid = true
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch, justified Epoch %d, %d != %d, %d",
|
||||
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, bestDescendant.justifiedEpoch, s.finalizedCheckpoint.Epoch, s.justifiedCheckpoint.Epoch)
|
||||
@@ -142,6 +142,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
unrealizedFinalizedEpoch: finalizedEpoch,
|
||||
optimistic: true,
|
||||
payloadHash: payloadHash,
|
||||
timestamp: uint64(time.Now().Unix()),
|
||||
}
|
||||
|
||||
s.nodeByPayload[payloadHash] = n
|
||||
@@ -174,7 +175,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
s.checkpointsLock.RUnlock()
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch); err != nil {
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ type Node struct {
|
||||
weight uint64 // weight of this node: the total balance including children
|
||||
bestDescendant *Node // bestDescendant node of this node.
|
||||
optimistic bool // whether the block has been fully validated or not
|
||||
timestamp uint64 // The timestamp when the node was inserted.
|
||||
}
|
||||
|
||||
// Vote defines an individual validator's vote.
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
@@ -198,31 +199,36 @@ func TestStore_NoDeadLock(t *testing.T) {
|
||||
// D justifies and comes late.
|
||||
//
|
||||
func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableDefensivePull: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
|
||||
// Epoch 1 blocks (D does not arrive)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 92, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 93, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 94, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Epoch 2 blocks
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'c'}, [32]byte{'E'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'e'}, [32]byte{'c'}, [32]byte{'E'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 106, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 98, [32]byte{'g'}, [32]byte{'f'}, [32]byte{'G'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 107, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 99, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -234,16 +240,25 @@ func TestStore_ForkNextEpoch(t *testing.T) {
|
||||
require.Equal(t, types.Epoch(0), f.JustifiedCheckpoint().Epoch)
|
||||
|
||||
// D arrives late, D is head
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 95, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 1))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1}
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'d'}, 2))
|
||||
f.store.unrealizedJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 2}
|
||||
f.updateUnrealizedCheckpoints()
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'d'}, headRoot)
|
||||
require.Equal(t, types.Epoch(1), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
// Set current epoch to 3, and H's unrealized checkpoint. Check it's head
|
||||
driftGenesisTime(f, 99, 0)
|
||||
require.NoError(t, f.store.setUnrealizedJustifiedEpoch([32]byte{'h'}, 2))
|
||||
headRoot, err = f.Head(ctx, []uint64{100})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'h'}, headRoot)
|
||||
require.Equal(t, types.Epoch(2), f.JustifiedCheckpoint().Epoch)
|
||||
require.Equal(t, uint64(0), f.store.nodeByRoot[[32]byte{'d'}].weight)
|
||||
require.Equal(t, uint64(100), f.store.nodeByRoot[[32]byte{'h'}].weight)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
)
|
||||
|
||||
// ForkChoicer represents the full fork choice interface composed of all the sub-interfaces.
|
||||
@@ -51,7 +52,7 @@ type Getter interface {
|
||||
ProposerBoost() [fieldparams.RootLength]byte
|
||||
HasParent(root [32]byte) bool
|
||||
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([32]byte, error)
|
||||
CommonAncestorRoot(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, error)
|
||||
CommonAncestor(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, types.Slot, error)
|
||||
IsCanonical(root [32]byte) bool
|
||||
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
FinalizedPayloadBlockHash() [32]byte
|
||||
@@ -62,6 +63,7 @@ type Getter interface {
|
||||
NodeCount() int
|
||||
HighestReceivedBlockSlot() types.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
ForkChoiceDump(context.Context) (*v1.ForkChoiceResponse, error)
|
||||
}
|
||||
|
||||
// Setter allows to set forkchoice information
|
||||
|
||||
@@ -32,6 +32,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
@@ -63,6 +64,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/v3:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
pmath "github.com/prysmaticlabs/prysm/v3/math"
|
||||
v1 "github.com/prysmaticlabs/prysm/v3/proto/eth/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
@@ -281,49 +282,51 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
}
|
||||
|
||||
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
|
||||
func (f *ForkChoice) CommonAncestorRoot(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, error) {
|
||||
func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byte) ([32]byte, types.Slot, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArray.CommonAncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
// Do nothing if the two input roots are the same.
|
||||
if r1 == r2 {
|
||||
return r1, nil
|
||||
}
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
i1, ok := f.store.nodesIndices[r1]
|
||||
if !ok || i1 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
// Do nothing if the two input roots are the same.
|
||||
if r1 == r2 {
|
||||
n1 := f.store.nodes[i1]
|
||||
return r1, n1.slot, nil
|
||||
}
|
||||
|
||||
i2, ok := f.store.nodesIndices[r2]
|
||||
if !ok || i2 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return [32]byte{}, ctx.Err()
|
||||
return [32]byte{}, 0, ctx.Err()
|
||||
}
|
||||
if i1 > i2 {
|
||||
n1 := f.store.nodes[i1]
|
||||
i1 = n1.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if i1 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
} else {
|
||||
n2 := f.store.nodes[i2]
|
||||
i2 = n2.parent
|
||||
// Reaches the end of the tree and unable to find common ancestor.
|
||||
if i2 >= uint64(len(f.store.nodes)) {
|
||||
return [32]byte{}, forkchoice.ErrUnknownCommonAncestor
|
||||
return [32]byte{}, 0, forkchoice.ErrUnknownCommonAncestor
|
||||
}
|
||||
}
|
||||
if i1 == i2 {
|
||||
n1 := f.store.nodes[i1]
|
||||
return n1.root, nil
|
||||
return n1.root, n1.slot, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -882,6 +885,15 @@ func (s *Store) viableForHead(node *Node) bool {
|
||||
// It's also viable if we are in genesis epoch.
|
||||
justified := s.justifiedCheckpoint.Epoch == node.justifiedEpoch || s.justifiedCheckpoint.Epoch == 0
|
||||
finalized := s.finalizedCheckpoint.Epoch == node.finalizedEpoch || s.finalizedCheckpoint.Epoch == 0
|
||||
if features.Get().EnableDefensivePull {
|
||||
currentEpoch := slots.EpochsSinceGenesis(time.Unix(int64(s.genesisTime), 0))
|
||||
if !justified && s.justifiedCheckpoint.Epoch+1 == currentEpoch {
|
||||
if node.unrealizedJustifiedEpoch+1 >= currentEpoch {
|
||||
justified = true
|
||||
finalized = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return justified && finalized
|
||||
}
|
||||
@@ -1079,3 +1091,7 @@ func (f *ForkChoice) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (*ForkChoice) ForkChoiceDump(_ context.Context) (*v1.ForkChoiceResponse, error) {
|
||||
return nil, errors.New("ForkChoiceDump is not supported by protoarray")
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
@@ -677,73 +678,85 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between c and b is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and d is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'d'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between c and e is a",
|
||||
r1: [32]byte{'c'},
|
||||
r2: [32]byte{'e'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and f is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between f and h is c",
|
||||
r1: [32]byte{'f'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between g and h is c",
|
||||
r1: [32]byte{'g'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and h is a",
|
||||
r1: [32]byte{'b'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'e'},
|
||||
r2: [32]byte{'h'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between i and f is c",
|
||||
r1: [32]byte{'i'},
|
||||
r2: [32]byte{'f'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between e and h is a",
|
||||
r1: [32]byte{'j'},
|
||||
r2: [32]byte{'g'},
|
||||
wantRoot: [32]byte{'c'},
|
||||
wantSlot: 2,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -766,52 +779,59 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
r1 [32]byte
|
||||
r2 [32]byte
|
||||
wantRoot [32]byte
|
||||
wantSlot types.Slot
|
||||
}{
|
||||
{
|
||||
name: "Common ancestor between a and b is a",
|
||||
r1: [32]byte{'a'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between b and d is b",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'b'},
|
||||
wantRoot: [32]byte{'b'},
|
||||
wantSlot: 1,
|
||||
},
|
||||
{
|
||||
name: "Common ancestor between d and a is a",
|
||||
r1: [32]byte{'d'},
|
||||
r2: [32]byte{'a'},
|
||||
wantRoot: [32]byte{'a'},
|
||||
wantSlot: 0,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotRoot, err := f.CommonAncestorRoot(ctx, tc.r1, tc.r2)
|
||||
gotRoot, gotSlot, err := f.CommonAncestor(ctx, tc.r1, tc.r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantRoot, gotRoot)
|
||||
require.Equal(t, tc.wantSlot, gotSlot)
|
||||
})
|
||||
}
|
||||
|
||||
// Equal inputs should return the same root.
|
||||
r, err := f.CommonAncestorRoot(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
r, s, err := f.CommonAncestor(ctx, [32]byte{'b'}, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'b'}, r)
|
||||
require.Equal(t, types.Slot(1), s)
|
||||
// Requesting finalized root (last node) should return the same root.
|
||||
r, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
r, s, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'a'}, r)
|
||||
require.Equal(t, types.Slot(0), s)
|
||||
// Requesting unknown root
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'a'}, [32]byte{'z'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'z'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'y'}, [32]byte{'z'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
// broken link
|
||||
_, err = f.CommonAncestorRoot(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
_, _, err = f.CommonAncestor(ctx, [32]byte{'y'}, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, forkchoice.ErrUnknownCommonAncestor)
|
||||
}
|
||||
|
||||
@@ -868,6 +888,43 @@ func TestStore_ViableForHead(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_ViableForHead_DefensivePull(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableDefensivePull: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
tests := []struct {
|
||||
n *Node
|
||||
justifiedEpoch types.Epoch
|
||||
finalizedEpoch types.Epoch
|
||||
currentEpoch types.Epoch
|
||||
want bool
|
||||
}{
|
||||
{&Node{}, 0, 0, 0, true},
|
||||
{&Node{}, 1, 0, 1, false},
|
||||
{&Node{}, 0, 1, 1, false},
|
||||
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 1, 1, 1, true},
|
||||
{&Node{finalizedEpoch: 1, justifiedEpoch: 1}, 2, 2, 2, false},
|
||||
{&Node{finalizedEpoch: 3, justifiedEpoch: 4}, 4, 3, 3, true},
|
||||
{&Node{unrealizedFinalizedEpoch: 3, unrealizedJustifiedEpoch: 4}, 3, 2, 4, true},
|
||||
{&Node{unrealizedFinalizedEpoch: 2, unrealizedJustifiedEpoch: 3}, 3, 2, 4, true},
|
||||
{&Node{unrealizedFinalizedEpoch: 1, unrealizedJustifiedEpoch: 2}, 3, 2, 4, false},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: tc.justifiedEpoch}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: tc.finalizedEpoch}
|
||||
currentTime := uint64(time.Now().Unix())
|
||||
driftSeconds := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
s := &Store{
|
||||
justifiedCheckpoint: jc,
|
||||
finalizedCheckpoint: fc,
|
||||
genesisTime: currentTime - driftSeconds*uint64(tc.currentEpoch),
|
||||
}
|
||||
assert.Equal(t, tc.want, s.viableForHead(tc.n))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_HasParent(t *testing.T) {
|
||||
tests := []struct {
|
||||
m map[[32]byte]uint64
|
||||
|
||||
@@ -105,7 +105,6 @@ type BeaconNode struct {
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
blockchainFlagOpts []blockchain.Option
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
}
|
||||
|
||||
@@ -1,20 +1,39 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
knownAgentVersions = []string{
|
||||
"lighthouse",
|
||||
"nimbus",
|
||||
"prysm",
|
||||
"teku",
|
||||
"js-libp2p",
|
||||
"rust-libp2p",
|
||||
}
|
||||
p2pPeerCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "p2p_peer_count",
|
||||
Help: "The number of peers in a given state.",
|
||||
},
|
||||
[]string{"state"})
|
||||
totalPeerCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "libp2p_peers",
|
||||
Help: "Tracks the total number of libp2p peers",
|
||||
})
|
||||
connectedPeersCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "connected_libp2p_peers",
|
||||
Help: "Tracks the total number of connected libp2p peers by agent string",
|
||||
},
|
||||
[]string{"agent"},
|
||||
)
|
||||
avgScoreConnectedClients = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "connected_libp2p_peers_average_scores",
|
||||
Help: "Tracks the overall p2p scores of connected libp2p peers by agent string",
|
||||
},
|
||||
[]string{"agent"},
|
||||
)
|
||||
repeatPeerConnections = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "p2p_repeat_attempts",
|
||||
Help: "The number of repeat attempts the connection handler is triggered for a peer.",
|
||||
@@ -46,10 +65,60 @@ var (
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
totalPeerCount.Set(float64(len(s.peers.Connected())))
|
||||
p2pPeerCount.WithLabelValues("Connected").Set(float64(len(s.peers.Connected())))
|
||||
connectedPeers := s.peers.Connected()
|
||||
p2pPeerCount.WithLabelValues("Connected").Set(float64(len(connectedPeers)))
|
||||
p2pPeerCount.WithLabelValues("Disconnected").Set(float64(len(s.peers.Disconnected())))
|
||||
p2pPeerCount.WithLabelValues("Connecting").Set(float64(len(s.peers.Connecting())))
|
||||
p2pPeerCount.WithLabelValues("Disconnecting").Set(float64(len(s.peers.Disconnecting())))
|
||||
p2pPeerCount.WithLabelValues("Bad").Set(float64(len(s.peers.Bad())))
|
||||
|
||||
store := s.Host().Peerstore()
|
||||
numConnectedPeersByClient := make(map[string]float64)
|
||||
peerScoresByClient := make(map[string][]float64)
|
||||
for i := 0; i < len(connectedPeers); i++ {
|
||||
p := connectedPeers[i]
|
||||
pid, err := peer.Decode(p.String())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not decode peer string")
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the agent data.
|
||||
rawAgent, err := store.Get(pid, "AgentVersion")
|
||||
agent, ok := rawAgent.(string)
|
||||
if err != nil || !ok {
|
||||
agent = "unknown"
|
||||
}
|
||||
foundName := "unknown"
|
||||
for _, knownAgent := range knownAgentVersions {
|
||||
// If the agent string matches one of our known agents, we set
|
||||
// the value to our own, sanitized string.
|
||||
if strings.Contains(strings.ToLower(agent), knownAgent) {
|
||||
foundName = knownAgent
|
||||
}
|
||||
}
|
||||
numConnectedPeersByClient[foundName] += 1
|
||||
|
||||
// Get peer scoring data.
|
||||
overallScore := s.peers.Scorers().Score(pid)
|
||||
peerScoresByClient[foundName] = append(peerScoresByClient[foundName], overallScore)
|
||||
}
|
||||
for agent, total := range numConnectedPeersByClient {
|
||||
connectedPeersCount.WithLabelValues(agent).Set(total)
|
||||
}
|
||||
for agent, scoringData := range peerScoresByClient {
|
||||
avgScore := average(scoringData)
|
||||
avgScoreConnectedClients.WithLabelValues(agent).Set(avgScore)
|
||||
}
|
||||
}
|
||||
|
||||
func average(xs []float64) float64 {
|
||||
if len(xs) == 0 {
|
||||
return 0
|
||||
}
|
||||
total := 0.0
|
||||
for _, v := range xs {
|
||||
total += v
|
||||
}
|
||||
return total / float64(len(xs))
|
||||
}
|
||||
|
||||
@@ -245,9 +245,7 @@ func (s *Service) Start() {
|
||||
})
|
||||
async.RunEvery(s.ctx, 30*time.Minute, s.Peers().Prune)
|
||||
async.RunEvery(s.ctx, params.BeaconNetworkConfig().RespTimeout, s.updateMetrics)
|
||||
async.RunEvery(s.ctx, refreshRate, func() {
|
||||
s.RefreshENR()
|
||||
})
|
||||
async.RunEvery(s.ctx, refreshRate, s.RefreshENR)
|
||||
async.RunEvery(s.ctx, 1*time.Minute, func() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"inbound": len(s.peers.InboundConnected()),
|
||||
|
||||
@@ -50,6 +50,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
|
||||
"/eth/v2/debug/beacon/states/{state_id}",
|
||||
"/eth/v1/debug/beacon/heads",
|
||||
"/eth/v2/debug/beacon/heads",
|
||||
"/eth/v1/debug/beacon/forkchoice",
|
||||
"/eth/v1/config/fork_schedule",
|
||||
"/eth/v1/config/deposit_contract",
|
||||
"/eth/v1/config/spec",
|
||||
@@ -185,6 +186,8 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
|
||||
endpoint.GetResponse = &forkChoiceHeadsResponseJson{}
|
||||
case "/eth/v2/debug/beacon/heads":
|
||||
endpoint.GetResponse = &v2ForkChoiceHeadsResponseJson{}
|
||||
case "/eth/v1/debug/beacon/forkchoice":
|
||||
endpoint.GetResponse = &forkchoiceResponse{}
|
||||
case "/eth/v1/config/fork_schedule":
|
||||
endpoint.GetResponse = &forkScheduleResponseJson{}
|
||||
case "/eth/v1/config/deposit_contract":
|
||||
|
||||
@@ -277,6 +277,18 @@ type submitContributionAndProofsRequestJson struct {
|
||||
Data []*signedContributionAndProofJson `json:"data"`
|
||||
}
|
||||
|
||||
type forkchoiceResponse struct {
|
||||
JustifiedCheckpoint *checkpointJson `json:"justified_checkpoint"`
|
||||
FinalizedCheckpoint *checkpointJson `json:"finalized_checkpoint"`
|
||||
BestJustifiedCheckpoint *checkpointJson `json:"best_justified_checkpoint"`
|
||||
UnrealizedJustifiedCheckpoint *checkpointJson `json:"unrealized_justified_checkpoint"`
|
||||
UnrealizedFinalizedCheckpoint *checkpointJson `json:"unrealized_finalized_checkpoint"`
|
||||
ProposerBoostRoot string `json:"proposer_boost_root" hex:"true"`
|
||||
PreviousProposerBoostRoot string `json:"previous_proposer_boost_root" hex:"true"`
|
||||
HeadRoot string `json:"head_root" hex:"true"`
|
||||
ForkChoiceNodes []*forkChoiceNodeJson `json:"forkchoice_nodes"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
// Reusable types.
|
||||
//----------------
|
||||
@@ -789,6 +801,20 @@ type signedValidatorRegistrationsRequestJson struct {
|
||||
Registrations []*signedValidatorRegistrationJson `json:"registrations"`
|
||||
}
|
||||
|
||||
type forkChoiceNodeJson struct {
|
||||
Slot string `json:"slot"`
|
||||
Root string `json:"root" hex:"true"`
|
||||
ParentRoot string `json:"parent_root" hex:"true"`
|
||||
JustifiedEpoch string `json:"justified_epoch"`
|
||||
FinalizedEpoch string `json:"finalized_epoch"`
|
||||
UnrealizedJustifiedEpoch string `json:"unrealized_justified_epoch"`
|
||||
UnrealizedFinalizedEpoch string `json:"unrealized_finalized_epoch"`
|
||||
Balance string `json:"balance"`
|
||||
Weight string `json:"weight"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
ExecutionPayload string `json:"execution_payload" hex:"true"`
|
||||
}
|
||||
|
||||
//----------------
|
||||
// SSZ
|
||||
// ---------------
|
||||
|
||||
@@ -31,6 +31,8 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -39,6 +41,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -140,3 +140,8 @@ func (ds *Server) ListForkChoiceHeadsV2(ctx context.Context, _ *emptypb.Empty) (
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetForkChoice returns a dump fork choice store.
|
||||
func (ds *Server) GetForkChoice(ctx context.Context, _ *emptypb.Empty) (*ethpbv1.ForkChoiceResponse, error) {
|
||||
return ds.ForkFetcher.ForkChoicer().ForkChoiceDump(ctx)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,11 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
blockchainmock "github.com/prysmaticlabs/prysm/v3/beacon-chain/blockchain/testing"
|
||||
dbTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/rpc/testutil"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
@@ -237,3 +240,18 @@ func TestListForkChoiceHeadsV2(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_GetForkChoice(t *testing.T) {
|
||||
store := doublylinkedtree.New()
|
||||
fRoot := [32]byte{'a'}
|
||||
jRoot := [32]byte{'b'}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: fRoot}
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 3, Root: jRoot}
|
||||
require.NoError(t, store.UpdateFinalizedCheckpoint(fc))
|
||||
require.NoError(t, store.UpdateJustifiedCheckpoint(jc))
|
||||
bs := &Server{ForkFetcher: &blockchainmock.ChainService{ForkChoiceStore: store}}
|
||||
res, err := bs.GetForkChoice(context.Background(), &empty.Empty{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, types.Epoch(3), res.JustifiedCheckpoint.Epoch, "Did not get wanted justified epoch")
|
||||
require.Equal(t, types.Epoch(2), res.FinalizedCheckpoint.Epoch, "Did not get wanted finalized epoch")
|
||||
}
|
||||
|
||||
@@ -16,4 +16,5 @@ type Server struct {
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
StateFetcher statefetcher.Fetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
}
|
||||
|
||||
@@ -54,11 +54,13 @@ go_test(
|
||||
"//beacon-chain/builder/testing:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/attestations/mock:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
@@ -73,6 +75,7 @@ go_test(
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -195,11 +195,11 @@ func (vs *Server) GetProposerDuties(ctx context.Context, req *ethpbv1.ProposerDu
|
||||
// where `epoch` is described as `epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD <= current_epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD + 1`.
|
||||
//
|
||||
// Algorithm:
|
||||
// - Get the last valid epoch. This is the last epoch of the next sync committee period.
|
||||
// - Get the state for the requested epoch. If it's a future epoch from the current sync committee period
|
||||
// or an epoch from the next sync committee period, then get the current state.
|
||||
// - Get the state's current sync committee. If it's an epoch from the next sync committee period, then get the next sync committee.
|
||||
// - Get duties.
|
||||
// - Get the last valid epoch. This is the last epoch of the next sync committee period.
|
||||
// - Get the state for the requested epoch. If it's a future epoch from the current sync committee period
|
||||
// or an epoch from the next sync committee period, then get the current state.
|
||||
// - Get the state's current sync committee. If it's an epoch from the next sync committee period, then get the next sync committee.
|
||||
// - Get duties.
|
||||
func (vs *Server) GetSyncCommitteeDuties(ctx context.Context, req *ethpbv2.SyncCommitteeDutiesRequest) (*ethpbv2.SyncCommitteeDutiesResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.GetSyncCommitteeDuties")
|
||||
defer span.End()
|
||||
@@ -404,7 +404,11 @@ func (vs *Server) ProduceBlockV2SSZ(ctx context.Context, req *ethpbv1.ProduceBlo
|
||||
// ProduceBlindedBlock requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
// which can then be signed by a proposer and submitted.
|
||||
//
|
||||
// Pre-Bellatrix, this endpoint will return a regular block.
|
||||
// Under the following conditions, this endpoint will return an error.
|
||||
// - The node is syncing or optimistic mode (after bellatrix).
|
||||
// - The builder is not figured (after bellatrix).
|
||||
// - The relayer circuit breaker is activated (after bellatrix).
|
||||
// - The relayer responded with an error (after bellatrix).
|
||||
func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv2.ProduceBlindedBlockResponse, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "validator.ProduceBlindedBlock")
|
||||
defer span.End()
|
||||
@@ -413,57 +417,76 @@ func (vs *Server) ProduceBlindedBlock(ctx context.Context, req *ethpbv1.ProduceB
|
||||
// We simply return the error because it's already a gRPC error.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
|
||||
// Before Bellatrix, return normal block.
|
||||
if req.Slot < types.Slot(params.BeaconConfig().BellatrixForkEpoch)*params.BeaconConfig().SlotsPerEpoch {
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_Phase0Block{Phase0Block: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_AltairBlock{AltairBlock: block},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// After Bellatrix, return blinded block.
|
||||
optimistic, err := vs.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
// We simply return err because it's already of a gRPC error type.
|
||||
return nil, err
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine if the node is a optimistic node: %v", err)
|
||||
}
|
||||
phase0Block, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Phase0)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1ToV1Block(phase0Block.Phase0)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_PHASE0,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_Phase0Block{Phase0Block: block},
|
||||
},
|
||||
}, nil
|
||||
if optimistic {
|
||||
return nil, status.Errorf(codes.Unavailable, "The node is currently optimistic and cannot serve validators")
|
||||
}
|
||||
altairBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Altair)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockAltairToV2(altairBlock.Altair)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_ALTAIR,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_AltairBlock{AltairBlock: block},
|
||||
},
|
||||
}, nil
|
||||
altairBlk, err := vs.V1Alpha1Server.BuildAltairBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
bellatrixBlock, ok := v1alpha1resp.Block.(*ethpbalpha.GenericBeaconBlock_Bellatrix)
|
||||
if ok {
|
||||
block, err := migration.V1Alpha1BeaconBlockBellatrixToV2Blinded(bellatrixBlock.Bellatrix)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: block},
|
||||
},
|
||||
}, nil
|
||||
ok, b, err := vs.V1Alpha1Server.GetAndBuildBlindBlock(ctx, altairBlk)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare blind beacon block: %v", err)
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported block type")
|
||||
if !ok {
|
||||
return nil, status.Error(codes.Unavailable, "Builder is not available due to miss-config or circuit breaker")
|
||||
}
|
||||
blk, err := migration.V1Alpha1BeaconBlockBlindedBellatrixToV2Blinded(b.GetBlindedBellatrix())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare beacon block: %v", err)
|
||||
}
|
||||
return ðpbv2.ProduceBlindedBlockResponse{
|
||||
Version: ethpbv2.Version_BELLATRIX,
|
||||
Data: ðpbv2.BlindedBeaconBlockContainer{
|
||||
Block: ðpbv2.BlindedBeaconBlockContainer_BellatrixBlock{BellatrixBlock: blk},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ProduceBlindedBlockSSZ requests the beacon node to produce a valid unsigned blinded beacon block,
|
||||
@@ -1012,19 +1035,6 @@ func v1ValidatorStatusToV1Alpha1(valStatus ethpbv1.ValidatorStatus) ethpbalpha.V
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Server) v1BeaconBlock(ctx context.Context, req *ethpbv1.ProduceBlockRequest) (*ethpbv1.BeaconBlock, error) {
|
||||
v1alpha1req := ðpbalpha.BlockRequest{
|
||||
Slot: req.Slot,
|
||||
RandaoReveal: req.RandaoReveal,
|
||||
Graffiti: req.Graffiti,
|
||||
}
|
||||
v1alpha1resp, err := vs.V1Alpha1Server.GetBeaconBlock(ctx, v1alpha1req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return migration.V1Alpha1ToV1Block(v1alpha1resp.GetPhase0())
|
||||
}
|
||||
|
||||
func syncCommitteeDutiesLastValidEpoch(currentEpoch types.Epoch) types.Epoch {
|
||||
currentSyncPeriodIndex := currentEpoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod
|
||||
// Return the last epoch of the next sync committee.
|
||||
|
||||
@@ -13,11 +13,13 @@ import (
|
||||
builderTest "github.com/prysmaticlabs/prysm/v3/beacon-chain/builder/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v3/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/transition"
|
||||
dbutil "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v3/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/attestations/mock"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/operations/slashings"
|
||||
@@ -32,6 +34,7 @@ import (
|
||||
mockSync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v3/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
@@ -1842,7 +1845,7 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
assert.DeepEqual(t, aggregatedSig, blk.Body.SyncAggregate.SyncCommitteeSignature)
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
t.Run("Can get blind block from builder service", func(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -1850,6 +1853,8 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
bc := params.BeaconConfig().Copy()
|
||||
bc.AltairForkEpoch = types.Epoch(0)
|
||||
bc.BellatrixForkEpoch = types.Epoch(1)
|
||||
bc.MaxBuilderConsecutiveMissedSlots = params.BeaconConfig().SlotsPerEpoch + 1
|
||||
bc.MaxBuilderEpochMissedSlots = params.BeaconConfig().SlotsPerEpoch
|
||||
params.OverrideBeaconConfig(bc)
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
@@ -1870,14 +1875,56 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveState(ctx, beaconState, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
ExecutionEngineCaller: &mockExecution.EngineClient{
|
||||
ExecutionBlock: &enginev1.ExecutionBlock{
|
||||
TotalDifficulty: "0x1",
|
||||
},
|
||||
fb := util.HydrateSignedBeaconBlockBellatrix(ðpbalpha.SignedBeaconBlockBellatrix{})
|
||||
fb.Block.Body.ExecutionPayload.GasLimit = 123
|
||||
wfb, err := blocks.NewSignedBeaconBlock(fb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wfb), "Could not save block")
|
||||
r, err := wfb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
ti := time.Unix(0, 0)
|
||||
ts, err := slots.ToTime(uint64(ti.Unix()), 33)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetGenesisTime(uint64(ti.Unix())))
|
||||
random, err := helpers.RandaoMix(beaconState, coreTime.CurrentEpoch(beaconState))
|
||||
require.NoError(t, err)
|
||||
bid := ðpbalpha.BuilderBid{
|
||||
Header: &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: random,
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockNumber: 1,
|
||||
Timestamp: uint64(ts.Unix()),
|
||||
},
|
||||
TimeFetcher: &mockChain.ChainService{},
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:]},
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
Value: bytesutil.PadTo([]byte{1, 2, 3}, 32),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(bid, domain)
|
||||
require.NoError(t, err)
|
||||
sBid := ðpbalpha.SignedBuilderBid{
|
||||
Message: bid,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
|
||||
v1Alpha1Server := &v1alpha1validator.Server{
|
||||
BeaconDB: db,
|
||||
ForkFetcher: &mockChain.ChainService{ForkChoiceStore: protoarray.New()},
|
||||
TimeFetcher: &mockChain.ChainService{
|
||||
Genesis: ti,
|
||||
},
|
||||
HeadFetcher: &mockChain.ChainService{State: beaconState, Root: parentRoot[:], Block: wfb},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BlockReceiver: &mockChain.ChainService{},
|
||||
@@ -1892,6 +1939,15 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
StateGen: stategen.New(db),
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
ProposerSlotIndexCache: cache.NewProposerPayloadIDsCache(),
|
||||
BlockBuilder: &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
Bid: sBid,
|
||||
},
|
||||
FinalizationFetcher: &mockChain.ChainService{
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{
|
||||
Root: r[:],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
proposerSlashings := make([]*ethpbalpha.ProposerSlashing, params.BeaconConfig().MaxProposerSlashings)
|
||||
@@ -1949,8 +2005,10 @@ func TestProduceBlindedBlock(t *testing.T) {
|
||||
require.NoError(t, v1Alpha1Server.SyncCommitteePool.SaveSyncCommitteeContribution(contribution))
|
||||
|
||||
v1Server := &Server{
|
||||
V1Alpha1Server: v1Alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
V1Alpha1Server: v1Alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: &mockChain.ChainService{},
|
||||
OptimisticModeFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
randaoReveal, err := util.RandaoReveal(beaconState, 1, privKeys)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -53,6 +53,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -64,6 +65,7 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
func (vs *Server) buildAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.buildAltairBeaconBlock")
|
||||
func (vs *Server) BuildAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.BuildAltairBeaconBlock")
|
||||
defer span.End()
|
||||
blkData, err := vs.buildPhase0BlockData(ctx, req)
|
||||
if err != nil {
|
||||
@@ -55,7 +55,7 @@ func (vs *Server) buildAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRe
|
||||
func (vs *Server) getAltairBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.BeaconBlockAltair, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.getAltairBeaconBlock")
|
||||
defer span.End()
|
||||
blk, err := vs.buildAltairBeaconBlock(ctx, req)
|
||||
blk, err := vs.BuildAltairBeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not build block data: %v", err)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -19,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v3/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/runtime/version"
|
||||
@@ -37,14 +39,14 @@ var builderGetPayloadMissCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
const blockBuilderTimeout = 1 * time.Second
|
||||
|
||||
func (vs *Server) getBellatrixBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
|
||||
altairBlk, err := vs.buildAltairBeaconBlock(ctx, req)
|
||||
altairBlk, err := vs.BuildAltairBeaconBlock(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
registered, err := vs.validatorRegistered(ctx, altairBlk.ProposerIndex)
|
||||
if registered && err == nil {
|
||||
builderReady, b, err := vs.getAndBuildBlindBlock(ctx, altairBlk)
|
||||
builderReady, b, err := vs.GetAndBuildBlindBlock(ctx, altairBlk)
|
||||
if err != nil {
|
||||
// In the event of an error, the node should fall back to default execution engine for building block.
|
||||
log.WithError(err).Error("Failed to build a block from external builder, falling " +
|
||||
@@ -108,6 +110,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
|
||||
if blocks.IsPreBellatrixVersion(b.Version()) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
h, err := b.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -120,6 +123,24 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if bid == nil || bid.Message == nil {
|
||||
return nil, errors.New("builder returned nil bid")
|
||||
}
|
||||
|
||||
v := new(big.Int).SetBytes(bytesutil.ReverseByteOrder(bid.Message.Value))
|
||||
if v.String() == "0" {
|
||||
return nil, errors.New("builder returned header with 0 bid amount")
|
||||
}
|
||||
|
||||
emptyRoot, err := ssz.TransactionsRoot([][]byte{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bytesutil.ToBytes32(bid.Message.Header.TransactionsRoot) == emptyRoot {
|
||||
return nil, errors.New("builder returned header with an empty tx root")
|
||||
}
|
||||
|
||||
if !bytes.Equal(bid.Message.Header.ParentHash, h.BlockHash()) {
|
||||
return nil, fmt.Errorf("incorrect parent hash %#x != %#x", bid.Message.Header.ParentHash, h.BlockHash())
|
||||
}
|
||||
@@ -137,7 +158,7 @@ func (vs *Server) getPayloadHeaderFromBuilder(ctx context.Context, slot types.Sl
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"bid": bytesutil.BytesToUint64BigEndian(bid.Message.Value),
|
||||
"value": v.String(),
|
||||
"builderPubKey": fmt.Sprintf("%#x", bid.Message.Pubkey),
|
||||
"blockHash": fmt.Sprintf("%#x", bid.Message.Header.BlockHash),
|
||||
}).Info("Received header with bid")
|
||||
@@ -357,10 +378,10 @@ func (vs *Server) circuitBreakBuilder(s types.Slot) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Get and build blind block from builder network. Returns a boolean status, built block and error.
|
||||
// GetAndBuildBlindBlock builds blind block from builder network. Returns a boolean status, built block and error.
|
||||
// If the status is false that means builder the header block is disallowed.
|
||||
// This routine is time limited by `blockBuilderTimeout`.
|
||||
func (vs *Server) getAndBuildBlindBlock(ctx context.Context, b *ethpb.BeaconBlockAltair) (bool, *ethpb.GenericBeaconBlock, error) {
|
||||
func (vs *Server) GetAndBuildBlindBlock(ctx context.Context, b *ethpb.BeaconBlockAltair) (bool, *ethpb.GenericBeaconBlock, error) {
|
||||
// No op. Builder is not defined. User did not specify a user URL. We should use local EE.
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return false, nil, nil
|
||||
|
||||
@@ -97,6 +97,40 @@ func TestServer_buildHeaderBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServer_getPayloadHeader(t *testing.T) {
|
||||
emptyRoot, err := ssz.TransactionsRoot([][]byte{})
|
||||
require.NoError(t, err)
|
||||
ti, err := slots.ToTime(uint64(time.Now().Unix()), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
bid := ðpb.BuilderBid{
|
||||
Header: &v1.ExecutionPayloadHeader{
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: bytesutil.PadTo([]byte{1}, fieldparams.RootLength),
|
||||
ParentHash: params.BeaconConfig().ZeroHash[:],
|
||||
Timestamp: uint64(ti.Unix()),
|
||||
},
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
Value: bytesutil.PadTo([]byte{1, 2, 3}, 32),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(bid, domain)
|
||||
require.NoError(t, err)
|
||||
sBid := ðpb.SignedBuilderBid{
|
||||
Message: bid,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
tests := []struct {
|
||||
name string
|
||||
head interfaces.SignedBeaconBlock
|
||||
@@ -131,7 +165,7 @@ func TestServer_getPayloadHeader(t *testing.T) {
|
||||
err: "can't get header",
|
||||
},
|
||||
{
|
||||
name: "get header correct",
|
||||
name: "0 bid",
|
||||
mock: &builderTest.MockBuilderService{
|
||||
Bid: ðpb.SignedBuilderBid{
|
||||
Message: ðpb.BuilderBid{
|
||||
@@ -140,7 +174,6 @@ func TestServer_getPayloadHeader(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ErrGetHeader: errors.New("can't get header"),
|
||||
},
|
||||
fetcher: &blockchainTest.ChainService{
|
||||
Block: func() interfaces.SignedBeaconBlock {
|
||||
@@ -149,18 +182,55 @@ func TestServer_getPayloadHeader(t *testing.T) {
|
||||
return wb
|
||||
}(),
|
||||
},
|
||||
returnedHeader: &v1.ExecutionPayloadHeader{
|
||||
BlockNumber: 123,
|
||||
err: "builder returned header with 0 bid amount",
|
||||
},
|
||||
{
|
||||
name: "invalid tx root",
|
||||
mock: &builderTest.MockBuilderService{
|
||||
Bid: ðpb.SignedBuilderBid{
|
||||
Message: ðpb.BuilderBid{
|
||||
Value: []byte{1},
|
||||
Header: &v1.ExecutionPayloadHeader{
|
||||
BlockNumber: 123,
|
||||
TransactionsRoot: emptyRoot[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fetcher: &blockchainTest.ChainService{
|
||||
Block: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
}(),
|
||||
},
|
||||
err: "builder returned header with an empty tx root",
|
||||
},
|
||||
{
|
||||
name: "can get header",
|
||||
mock: &builderTest.MockBuilderService{
|
||||
Bid: sBid,
|
||||
},
|
||||
fetcher: &blockchainTest.ChainService{
|
||||
Block: func() interfaces.SignedBeaconBlock {
|
||||
wb, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
}(),
|
||||
},
|
||||
returnedHeader: bid.Header,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
vs := &Server{BlockBuilder: tc.mock, HeadFetcher: tc.fetcher}
|
||||
vs := &Server{BlockBuilder: tc.mock, HeadFetcher: tc.fetcher, TimeFetcher: &blockchainTest.ChainService{
|
||||
Genesis: time.Now(),
|
||||
}}
|
||||
h, err := vs.getPayloadHeaderFromBuilder(context.Background(), 0, 0)
|
||||
if err != nil {
|
||||
if tc.err != "" {
|
||||
require.ErrorContains(t, tc.err, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tc.returnedHeader, h)
|
||||
}
|
||||
})
|
||||
@@ -350,20 +420,20 @@ func TestServer_getAndBuildHeaderBlock(t *testing.T) {
|
||||
vs := &Server{}
|
||||
|
||||
// Nil builder
|
||||
ready, _, err := vs.getAndBuildBlindBlock(ctx, nil)
|
||||
ready, _, err := vs.GetAndBuildBlindBlock(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Not configured
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{}
|
||||
ready, _, err = vs.getAndBuildBlindBlock(ctx, nil)
|
||||
ready, _, err = vs.GetAndBuildBlindBlock(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
// Block is not ready
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true}
|
||||
vs.FinalizationFetcher = &blockchainTest.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
ready, _, err = vs.getAndBuildBlindBlock(ctx, nil)
|
||||
ready, _, err = vs.GetAndBuildBlindBlock(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
@@ -380,7 +450,7 @@ func TestServer_getAndBuildHeaderBlock(t *testing.T) {
|
||||
vs.HeadFetcher = &blockchainTest.ChainService{Block: wb1}
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, ErrGetHeader: errors.New("could not get payload")}
|
||||
vs.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: protoarray.New()}
|
||||
ready, _, err = vs.getAndBuildBlindBlock(ctx, ðpb.BeaconBlockAltair{})
|
||||
ready, _, err = vs.GetAndBuildBlindBlock(ctx, ðpb.BeaconBlockAltair{})
|
||||
require.ErrorContains(t, "could not get payload", err)
|
||||
require.Equal(t, false, ready)
|
||||
|
||||
@@ -456,7 +526,7 @@ func TestServer_getAndBuildHeaderBlock(t *testing.T) {
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{HasConfigured: true, Bid: sBid}
|
||||
vs.TimeFetcher = &blockchainTest.ChainService{Genesis: time.Now()}
|
||||
vs.ForkFetcher = &blockchainTest.ChainService{ForkChoiceStore: protoarray.New()}
|
||||
ready, builtBlk, err := vs.getAndBuildBlindBlock(ctx, altairBlk.Block)
|
||||
ready, builtBlk, err := vs.GetAndBuildBlindBlock(ctx, altairBlk.Block)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ready)
|
||||
require.DeepEqual(t, h, builtBlk.GetBlindedBellatrix().Body.ExecutionPayloadHeader)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v3/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v3/crypto/hash"
|
||||
@@ -106,6 +107,9 @@ func (vs *Server) canonicalEth1Data(
|
||||
canonicalEth1Data = beaconState.Eth1Data()
|
||||
eth1BlockHash = bytesutil.ToBytes32(beaconState.Eth1Data().BlockHash)
|
||||
}
|
||||
if features.Get().DisableStakinContractCheck && eth1BlockHash == [32]byte{} {
|
||||
return canonicalEth1Data, new(big.Int).SetInt64(0), nil
|
||||
}
|
||||
_, canonicalEth1DataHeight, err := vs.Eth1BlockFetcher.BlockExists(ctx, eth1BlockHash)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not fetch eth1data height")
|
||||
|
||||
@@ -123,6 +123,9 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not determine head state: %v", err)
|
||||
}
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, status.Errorf(codes.Internal, "head state is empty")
|
||||
}
|
||||
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.PublicKey))
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.NotFound, "Could not find validator index for public key %#x", req.PublicKey)
|
||||
|
||||
@@ -48,6 +48,18 @@ func TestValidatorIndex_OK(t *testing.T) {
|
||||
assert.NoError(t, err, "Could not get validator index")
|
||||
}
|
||||
|
||||
func TestValidatorIndex_StateEmpty(t *testing.T) {
|
||||
Server := &Server{
|
||||
HeadFetcher: &mockChain.ChainService{},
|
||||
}
|
||||
pubKey := pubKey(1)
|
||||
req := ðpb.ValidatorIndexRequest{
|
||||
PublicKey: pubKey,
|
||||
}
|
||||
_, err := Server.ValidatorIndex(context.Background(), req)
|
||||
assert.ErrorContains(t, "head state is empty", err)
|
||||
}
|
||||
|
||||
func TestWaitForActivation_ContextClosed(t *testing.T) {
|
||||
beaconState, err := v1.InitializeFromProto(ðpb.BeaconState{
|
||||
Slot: 0,
|
||||
|
||||
@@ -30,13 +30,14 @@ var errParticipation = status.Errorf(codes.Internal, "Failed to obtain epoch par
|
||||
|
||||
// ValidatorStatus returns the validator status of the current epoch.
|
||||
// The status response can be one of the following:
|
||||
// DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum.
|
||||
// PENDING - validator is in Ethereum's activation queue.
|
||||
// ACTIVE - validator is active.
|
||||
// EXITING - validator has initiated an an exit request, or has dropped below the ejection balance and is being kicked out.
|
||||
// EXITED - validator is no longer validating.
|
||||
// SLASHING - validator has been kicked out due to meeting a slashing condition.
|
||||
// UNKNOWN_STATUS - validator does not have a known status in the network.
|
||||
//
|
||||
// DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum.
|
||||
// PENDING - validator is in Ethereum's activation queue.
|
||||
// ACTIVE - validator is active.
|
||||
// EXITING - validator has initiated an an exit request, or has dropped below the ejection balance and is being kicked out.
|
||||
// EXITED - validator is no longer validating.
|
||||
// SLASHING - validator has been kicked out due to meeting a slashing condition.
|
||||
// UNKNOWN_STATUS - validator does not have a known status in the network.
|
||||
func (vs *Server) ValidatorStatus(
|
||||
ctx context.Context,
|
||||
req *ethpb.ValidatorStatusRequest,
|
||||
@@ -363,15 +364,6 @@ func (vs *Server) validatorStatus(
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Server) retrieveAfterEpochTransition(ctx context.Context, epoch types.Epoch) (state.BeaconState, error) {
|
||||
endSlot, err := slots.EpochEnd(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// replay to first slot of following epoch
|
||||
return vs.ReplayerBuilder.ReplayerForSlot(endSlot).ReplayToSlot(ctx, endSlot+1)
|
||||
}
|
||||
|
||||
func checkValidatorsAreRecent(headEpoch types.Epoch, req *ethpb.DoppelGangerRequest) (bool, *ethpb.DoppelGangerResponse) {
|
||||
validatorsAreRecent := true
|
||||
resp := ðpb.DoppelGangerResponse{
|
||||
|
||||
@@ -351,6 +351,7 @@ func (s *Service) Start() {
|
||||
ReplayerBuilder: ch,
|
||||
},
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
ForkFetcher: s.cfg.ForkFetcher,
|
||||
}
|
||||
ethpbv1alpha1.RegisterDebugServer(s.grpcServer, debugServer)
|
||||
ethpbservice.RegisterBeaconDebugServer(s.grpcServer, debugServerV1)
|
||||
|
||||
@@ -239,7 +239,8 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
|
||||
// Is the state the genesis state.
|
||||
parentRoot := bytesutil.ToBytes32(b.Block().ParentRoot())
|
||||
if parentRoot == params.BeaconConfig().ZeroHash {
|
||||
return s.beaconDB.GenesisState(ctx)
|
||||
s, err := s.beaconDB.GenesisState(ctx)
|
||||
return s, errors.Wrap(err, "could not get genesis state")
|
||||
}
|
||||
|
||||
// Return an error if slot hasn't been covered by checkpoint sync.
|
||||
@@ -268,12 +269,13 @@ func (s *State) latestAncestor(ctx context.Context, blockRoot [32]byte) (state.B
|
||||
|
||||
// Does the state exists in DB.
|
||||
if s.beaconDB.HasState(ctx, parentRoot) {
|
||||
return s.beaconDB.State(ctx, parentRoot)
|
||||
s, err := s.beaconDB.State(ctx, parentRoot)
|
||||
return s, errors.Wrap(err, "failed to retrieve state from db")
|
||||
}
|
||||
|
||||
b, err = s.beaconDB.Block(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "failed to retrieve block from db")
|
||||
}
|
||||
if b == nil || b.IsNil() {
|
||||
return nil, errUnknownBlock
|
||||
|
||||
@@ -13,4 +13,16 @@ var (
|
||||
Buckets: []float64{64, 256, 1024, 2048, 4096},
|
||||
},
|
||||
)
|
||||
replayBlocksSummary = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "replay_blocks_milliseconds",
|
||||
Help: "Time it took to replay blocks",
|
||||
},
|
||||
)
|
||||
replayToSlotSummary = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "replay_to_slot_milliseconds",
|
||||
Help: "Time it took to replay to slot",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -63,7 +63,6 @@ type chainer interface {
|
||||
}
|
||||
|
||||
type stateReplayer struct {
|
||||
s state.BeaconState
|
||||
target types.Slot
|
||||
method retrievalMethod
|
||||
chainer chainer
|
||||
@@ -120,6 +119,7 @@ func (rs *stateReplayer) ReplayBlocks(ctx context.Context) (state.BeaconState, e
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": duration,
|
||||
}).Debug("Finished calling process_blocks on all blocks in ReplayBlocks")
|
||||
replayBlocksSummary.Observe(float64(duration.Milliseconds()))
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -151,14 +151,14 @@ func (rs *stateReplayer) ReplayToSlot(ctx context.Context, replayTo types.Slot)
|
||||
|
||||
// err will be handled after the bookend log
|
||||
s, err = ReplayProcessSlots(ctx, s, replayTo)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("ReplayToSlot failed to seek to slot %d after applying blocks", replayTo))
|
||||
}
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": duration,
|
||||
}).Debug("time spent in process_slots")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("ReplayToSlot failed to seek to slot %d after applying blocks", replayTo))
|
||||
}
|
||||
replayToSlotSummary.Observe(float64(duration.Milliseconds()))
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func (s *Service) processFetchedData(
|
||||
|
||||
// Use Batch Block Verify to process and verify batches directly.
|
||||
if err := s.processBatchedBlocks(ctx, genesis, data.blocks, s.cfg.Chain.ReceiveBlockBatch); err != nil {
|
||||
log.WithError(err).Warn("Batch is not processed")
|
||||
log.WithError(err).Warn("Skip processing batched blocks")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,7 +260,7 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
|
||||
headSlot := s.cfg.Chain.HeadSlot()
|
||||
for headSlot >= firstBlock.Block().Slot() && s.isProcessedBlock(ctx, firstBlock, blkRoot) {
|
||||
if len(blks) == 1 {
|
||||
return errors.New("no good blocks in batch")
|
||||
return fmt.Errorf("headSlot:%d, blockSlot:%d , root %#x:%w", headSlot, firstBlock.Block().Slot(), blkRoot, errBlockAlreadyProcessed)
|
||||
}
|
||||
blks = blks[1:]
|
||||
firstBlock = blks[0]
|
||||
|
||||
@@ -457,7 +457,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
ctx context.Context, blocks []interfaces.SignedBeaconBlock, blockRoots [][32]byte) error {
|
||||
return nil
|
||||
})
|
||||
assert.ErrorContains(t, "no good blocks in batch", err)
|
||||
assert.ErrorContains(t, "block is already processed", err)
|
||||
|
||||
var badBatch2 []interfaces.SignedBeaconBlock
|
||||
for i, b := range batch2 {
|
||||
|
||||
@@ -89,6 +89,38 @@ var (
|
||||
Buckets: []float64{250, 500, 1000, 1500, 2000, 4000, 8000, 16000},
|
||||
},
|
||||
)
|
||||
|
||||
// Attestation processing granular error tracking.
|
||||
attBadBlockCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "gossip_attestation_bad_block_total",
|
||||
Help: "Increased when a gossip attestation references a bad block",
|
||||
})
|
||||
attBadLmdConsistencyCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "gossip_attestation_bad_lmd_consistency_total",
|
||||
Help: "Increased when a gossip attestation has bad LMD GHOST consistency",
|
||||
})
|
||||
attBadSelectionProofCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "gossip_attestation_bad_selection_proof_total",
|
||||
Help: "Increased when a gossip attestation has a bad selection proof",
|
||||
})
|
||||
attBadSignatureBatchCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "gossip_attestation_bad_signature_batch_total",
|
||||
Help: "Increased when a gossip attestation has a bad signature batch",
|
||||
})
|
||||
|
||||
// Attestation and block gossip verification performance.
|
||||
aggregateAttestationVerificationGossipSummary = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "gossip_aggregate_attestation_verification_milliseconds",
|
||||
Help: "Time to verify gossiped attestations",
|
||||
},
|
||||
)
|
||||
blockVerificationGossipSummary = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "gossip_block_verification_milliseconds",
|
||||
Help: "Time to verify gossiped blocks",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v3/monitoring/tracing"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v3/time"
|
||||
"github.com/prysmaticlabs/prysm/v3/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -27,6 +28,7 @@ import (
|
||||
// validateAggregateAndProof verifies the aggregated signature and the selection proof is valid before forwarding to the
|
||||
// network and downstream services.
|
||||
func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
receivedTime := prysmTime.Now()
|
||||
if pid == s.cfg.p2p.PeerID() {
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
@@ -40,16 +42,6 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// We should not attempt to process this message if the node is running in optimistic mode.
|
||||
// We just ignore in p2p so that the peer is not penalized.
|
||||
optimistic, err := s.cfg.chain.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if optimistic {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
raw, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -85,8 +77,11 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
|
||||
// Attestation's slot is within ATTESTATION_PROPAGATION_SLOT_RANGE and early attestation
|
||||
// processing tolerance.
|
||||
if err := helpers.ValidateAttestationTime(m.Message.Aggregate.Data.Slot, s.cfg.chain.GenesisTime(),
|
||||
earlyAttestationProcessingTolerance); err != nil {
|
||||
if err := helpers.ValidateAttestationTime(
|
||||
m.Message.Aggregate.Data.Slot,
|
||||
s.cfg.chain.GenesisTime(),
|
||||
earlyAttestationProcessingTolerance,
|
||||
); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
@@ -99,6 +94,7 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Target.Root)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(m.Message.Aggregate.Data.Source.Root)) {
|
||||
attBadBlockCount.Inc()
|
||||
return pubsub.ValidationReject, errors.New("bad block referenced in attestation data")
|
||||
}
|
||||
|
||||
@@ -124,6 +120,8 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
|
||||
msg.ValidatorData = m
|
||||
|
||||
aggregateAttestationVerificationGossipSummary.Observe(float64(prysmTime.Since(receivedTime).Milliseconds()))
|
||||
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
@@ -137,6 +135,7 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
|
||||
// but it's invalid in the spirit of the protocol. Here we choose safety over profit.
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, signed.Message.Aggregate); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
attBadLmdConsistencyCount.Inc()
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
@@ -178,6 +177,7 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe
|
||||
if err != nil {
|
||||
wrappedErr := errors.Wrapf(err, "Could not validate selection for validator %d", signed.Message.AggregatorIndex)
|
||||
tracing.AnnotateError(span, wrappedErr)
|
||||
attBadSelectionProofCount.Inc()
|
||||
return pubsub.ValidationReject, wrappedErr
|
||||
}
|
||||
|
||||
|
||||
@@ -362,6 +362,7 @@ func TestValidateAggregateAndProof_CanValidate(t *testing.T) {
|
||||
beaconDB: db,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: &mock.ChainService{Genesis: time.Now().Add(-oneEpoch()),
|
||||
Optimistic: true,
|
||||
DB: db,
|
||||
State: beaconState,
|
||||
ValidAttestation: true,
|
||||
@@ -697,35 +698,3 @@ func TestValidateAggregateAndProof_RejectWhenAttEpochDoesntEqualTargetEpoch(t *t
|
||||
assert.NotNil(t, err)
|
||||
assert.Equal(t, pubsub.ValidationReject, res)
|
||||
}
|
||||
|
||||
func TestValidateAggregateAndProof_Optimistic(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
exit, s := setupValidExit(t)
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{
|
||||
State: s,
|
||||
Optimistic: true,
|
||||
},
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, exit)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(exit)]
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAggregateAndProof(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
valid := res == pubsub.ValidationIgnore
|
||||
assert.Equal(t, true, valid, "Validation should have ignored the message")
|
||||
}
|
||||
|
||||
@@ -26,16 +26,6 @@ func (s *Service) validateAttesterSlashing(ctx context.Context, pid peer.ID, msg
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// We should not attempt to process this message if the node is running in optimistic mode.
|
||||
// We just ignore in p2p so that the peer is not penalized.
|
||||
optimistic, err := s.cfg.chain.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if optimistic {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateAttesterSlashing")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -293,34 +293,3 @@ func TestSeenAttesterSlashingIndices(t *testing.T) {
|
||||
assert.Equal(t, tc.seen, r.hasSeenAttesterSlashingIndices(tc.checkIndices1, tc.checkIndices2))
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAttesterSlashing_Optimistic(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
slashing, s := setupValidAttesterSlashing(t)
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{State: s, Optimistic: true},
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(slashing)]
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateAttesterSlashing(ctx, "foobar", msg)
|
||||
assert.NoError(t, err)
|
||||
valid := res == pubsub.ValidationIgnore
|
||||
assert.Equal(t, true, valid, "Should have ignore this message")
|
||||
}
|
||||
|
||||
@@ -41,16 +41,6 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// We should not attempt to process this message if the node is running in optimistic mode.
|
||||
// We just ignore in p2p so that the peer is not penalized.
|
||||
optimistic, err := s.cfg.chain.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if optimistic {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateCommitteeIndexBeaconAttestation")
|
||||
defer span.End()
|
||||
|
||||
@@ -134,6 +124,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
if s.hasBadBlock(bytesutil.ToBytes32(att.Data.BeaconBlockRoot)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Target.Root)) ||
|
||||
s.hasBadBlock(bytesutil.ToBytes32(att.Data.Source.Root)) {
|
||||
attBadBlockCount.Inc()
|
||||
return pubsub.ValidationReject, errors.New("attestation data references bad block root")
|
||||
}
|
||||
|
||||
@@ -151,6 +142,7 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
}
|
||||
if err := s.cfg.chain.VerifyLmdFfgConsistency(ctx, att); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
attBadLmdConsistencyCount.Inc()
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
@@ -232,6 +224,7 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a *eth.A
|
||||
set, err := blocks.AttestationSignatureBatch(ctx, bs, []*eth.Attestation{a})
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
attBadSignatureBatchCount.Inc()
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
return s.validateWithBatchVerifier(ctx, "attestation", set)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -15,7 +14,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/core/signing"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v3/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v3/beacon-chain/p2p/testing"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v3/beacon-chain/sync/initial-sync/testing"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v3/cache/lru"
|
||||
@@ -23,7 +21,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v3/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v3/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v3/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v3/testing/util"
|
||||
)
|
||||
@@ -38,6 +35,7 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
ValidAttestation: true,
|
||||
DB: db,
|
||||
Optimistic: true,
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
@@ -306,37 +304,6 @@ func TestService_validateCommitteeIndexBeaconAttestation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceValidateCommitteeIndexBeaconAttestation_Optimistic(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
slashing, s := setupValidAttesterSlashing(t)
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
chain: &mockChain.ChainService{State: s, Optimistic: true},
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(slashing)]
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateCommitteeIndexBeaconAttestation(ctx, "foobar", msg)
|
||||
assert.NoError(t, err)
|
||||
valid := res == pubsub.ValidationIgnore
|
||||
assert.Equal(t, true, valid, "Should have ignore this message")
|
||||
}
|
||||
|
||||
func TestService_setSeenCommitteeIndicesSlot(t *testing.T) {
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
|
||||
@@ -204,6 +204,8 @@ func (s *Service) validateBeaconBlockPubSub(ctx context.Context, pid peer.ID, ms
|
||||
"proposerIndex": blk.Block().ProposerIndex(),
|
||||
"graffiti": string(blk.Block().Body().Graffiti()),
|
||||
}).Debug("Received block")
|
||||
|
||||
blockVerificationGossipSummary.Observe(float64(prysmTime.Since(receivedTime).Milliseconds()))
|
||||
return pubsub.ValidationAccept, nil
|
||||
}
|
||||
|
||||
@@ -253,16 +255,17 @@ func (s *Service) validateBeaconBlock(ctx context.Context, blk interfaces.Signed
|
||||
|
||||
// validateBellatrixBeaconBlock validates the block for the Bellatrix fork.
|
||||
// spec code:
|
||||
// If the execution is enabled for the block -- i.e. is_execution_enabled(state, block.body) then validate the following:
|
||||
// [REJECT] The block's execution payload timestamp is correct with respect to the slot --
|
||||
// i.e. execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot).
|
||||
//
|
||||
// If exection_payload verification of block's parent by an execution node is not complete:
|
||||
// [REJECT] The block's parent (defined by block.parent_root) passes all validation (excluding execution
|
||||
// node verification of the block.body.execution_payload).
|
||||
// otherwise:
|
||||
// [IGNORE] The block's parent (defined by block.parent_root) passes all validation (including execution
|
||||
// node verification of the block.body.execution_payload).
|
||||
// If the execution is enabled for the block -- i.e. is_execution_enabled(state, block.body) then validate the following:
|
||||
// [REJECT] The block's execution payload timestamp is correct with respect to the slot --
|
||||
// i.e. execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot).
|
||||
//
|
||||
// If exection_payload verification of block's parent by an execution node is not complete:
|
||||
// [REJECT] The block's parent (defined by block.parent_root) passes all validation (excluding execution
|
||||
// node verification of the block.body.execution_payload).
|
||||
// otherwise:
|
||||
// [IGNORE] The block's parent (defined by block.parent_root) passes all validation (including execution
|
||||
// node verification of the block.body.execution_payload).
|
||||
func (s *Service) validateBellatrixBeaconBlock(ctx context.Context, parentState state.BeaconState, blk interfaces.BeaconBlock) error {
|
||||
// Error if block and state are not the same version
|
||||
if parentState.Version() != blk.Version() {
|
||||
|
||||
@@ -26,16 +26,6 @@ func (s *Service) validateProposerSlashing(ctx context.Context, pid peer.ID, msg
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// We should not attempt to process this message if the node is running in optimistic mode.
|
||||
// We just ignore in p2p so that the peer is not penalized.
|
||||
optimistic, err := s.cfg.chain.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if optimistic {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateProposerSlashing")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -209,33 +209,3 @@ func TestValidateProposerSlashing_Syncing(t *testing.T) {
|
||||
valid := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, false, valid, "Did not fail validation")
|
||||
}
|
||||
|
||||
func TestValidateProposerSlashing_Optimistic(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
slashing, s := setupValidProposerSlashing(t)
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{State: s, Optimistic: true},
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(slashing)]
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateProposerSlashing(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
valid := res == pubsub.ValidationIgnore
|
||||
assert.Equal(t, true, valid, "Did not ignore the message")
|
||||
}
|
||||
|
||||
@@ -57,16 +57,6 @@ func (s *Service) validateSyncCommitteeMessage(
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// We should not attempt to process this message if the node is running in optimistic mode.
|
||||
// We just ignore in p2p so that the peer is not penalized.
|
||||
optimistic, err := s.cfg.chain.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if optimistic {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
if msg.Topic == nil {
|
||||
return pubsub.ValidationReject, errInvalidTopic
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
@@ -562,34 +561,3 @@ func Test_ignoreEmptyCommittee(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSyncCommitteeMessage_Optimistic(t *testing.T) {
|
||||
p := mockp2p.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
slashing, s := setupValidAttesterSlashing(t)
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
chain: &mockChain.ChainService{State: s, Optimistic: true},
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(slashing)]
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateCommitteeIndexBeaconAttestation(ctx, "foobar", msg)
|
||||
assert.NoError(t, err)
|
||||
valid := res == pubsub.ValidationIgnore
|
||||
assert.Equal(t, true, valid, "Should have ignore this message")
|
||||
}
|
||||
|
||||
@@ -52,16 +52,6 @@ func (s *Service) validateSyncContributionAndProof(ctx context.Context, pid peer
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// We should not attempt to process this message if the node is running in optimistic mode.
|
||||
// We just ignore in p2p so that the peer is not penalized.
|
||||
optimistic, err := s.cfg.chain.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if optimistic {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
m, err := s.readSyncContributionMessage(msg)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -1029,37 +1027,6 @@ func TestValidateSyncContributionAndProof(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSyncContributionAndProof_Optimistic(t *testing.T) {
|
||||
p := mockp2p.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
slashing, s := setupValidAttesterSlashing(t)
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
chain: &mockChain.ChainService{State: s, Optimistic: true},
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, slashing)
|
||||
require.NoError(t, err)
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(slashing)]
|
||||
msg := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateCommitteeIndexBeaconAttestation(ctx, "foobar", msg)
|
||||
assert.NoError(t, err)
|
||||
valid := res == pubsub.ValidationIgnore
|
||||
assert.Equal(t, true, valid, "Should have ignore this message")
|
||||
}
|
||||
|
||||
func fillUpBlocksAndState(ctx context.Context, t *testing.T, beaconDB db.Database) ([32]byte, []bls.SecretKey) {
|
||||
gs, keys := util.DeterministicGenesisStateAltair(t, 64)
|
||||
sCom, err := altair.NextSyncCommittee(ctx, gs)
|
||||
|
||||
@@ -29,16 +29,6 @@ func (s *Service) validateVoluntaryExit(ctx context.Context, pid peer.ID, msg *p
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
// We should not attempt to process this message if the node is running in optimistic mode.
|
||||
// We just ignore in p2p so that the peer is not penalized.
|
||||
optimistic, err := s.cfg.chain.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
if optimistic {
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.validateVoluntaryExit")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -195,35 +195,3 @@ func TestValidateVoluntaryExit_ValidExit_Syncing(t *testing.T) {
|
||||
valid := res == pubsub.ValidationAccept
|
||||
assert.Equal(t, false, valid, "Validation should have failed")
|
||||
}
|
||||
|
||||
func TestValidateVoluntaryExit_Optimistic(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx := context.Background()
|
||||
|
||||
exit, s := setupValidExit(t)
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p,
|
||||
chain: &mock.ChainService{
|
||||
State: s,
|
||||
Optimistic: true,
|
||||
},
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := p.Encoding().EncodeGossip(buf, exit)
|
||||
require.NoError(t, err)
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeOf(exit)]
|
||||
m := &pubsub.Message{
|
||||
Message: &pubsubpb.Message{
|
||||
Data: buf.Bytes(),
|
||||
Topic: &topic,
|
||||
},
|
||||
}
|
||||
res, err := r.validateVoluntaryExit(ctx, "", m)
|
||||
assert.NoError(t, err)
|
||||
valid := res == pubsub.ValidationIgnore
|
||||
assert.Equal(t, true, valid, "Validation should have ignored the message")
|
||||
}
|
||||
|
||||
@@ -23,9 +23,11 @@ func FlagOptions(c *cli.Context) ([]execution.Option, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not read JWT secret file for authenticating execution API")
|
||||
}
|
||||
headers := strings.Split(c.String(flags.ExecutionEngineHeaders.Name), ",")
|
||||
opts := []execution.Option{
|
||||
execution.WithHttpEndpoint(endpoint),
|
||||
execution.WithEth1HeaderRequestLimit(c.Uint64(flags.Eth1HeaderReqLimit.Name)),
|
||||
execution.WithHeaders(headers),
|
||||
}
|
||||
if len(jwtSecret) > 0 {
|
||||
opts = append(opts, execution.WithHttpEndpointAndJWTSecret(endpoint, jwtSecret))
|
||||
|
||||
@@ -32,6 +32,12 @@ var (
|
||||
Usage: "An execution client http endpoint. Can contain auth header as well in the format",
|
||||
Value: "http://localhost:8551",
|
||||
}
|
||||
// ExecutionEngineHeaders defines a list of HTTP headers to send with all execution client requests.
|
||||
ExecutionEngineHeaders = &cli.StringFlag{
|
||||
Name: "execution-headers",
|
||||
Usage: "A comma separated list of key value pairs to pass as HTTP headers for all execution " +
|
||||
"client calls. Example: --execution-headers=key1=value1,key2=value2",
|
||||
}
|
||||
// Deprecated: HTTPWeb3ProviderFlag is a deprecated flag and is an alias for the ExecutionEngineEndpoint flag.
|
||||
HTTPWeb3ProviderFlag = &cli.StringFlag{
|
||||
Name: "http-web3provider",
|
||||
|
||||
@@ -38,6 +38,7 @@ import (
|
||||
var appFlags = []cli.Flag{
|
||||
flags.DepositContractFlag,
|
||||
flags.ExecutionEngineEndpoint,
|
||||
flags.ExecutionEngineHeaders,
|
||||
flags.HTTPWeb3ProviderFlag,
|
||||
flags.ExecutionJWTSecretFlag,
|
||||
flags.RPCHost,
|
||||
@@ -188,6 +189,9 @@ func main() {
|
||||
if err := cmd.ExpandSingleEndpointIfFile(ctx, flags.ExecutionEngineEndpoint); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cmd.ExpandSingleEndpointIfFile(ctx, flags.HTTPWeb3ProviderFlag); err != nil {
|
||||
return err
|
||||
}
|
||||
if ctx.IsSet(flags.SetGCPercent.Name) {
|
||||
runtimeDebug.SetGCPercent(ctx.Int(flags.SetGCPercent.Name))
|
||||
}
|
||||
|
||||
@@ -107,6 +107,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
flags.GRPCGatewayPort,
|
||||
flags.GPRCGatewayCorsDomain,
|
||||
flags.ExecutionEngineEndpoint,
|
||||
flags.ExecutionEngineHeaders,
|
||||
flags.HTTPWeb3ProviderFlag,
|
||||
flags.ExecutionJWTSecretFlag,
|
||||
flags.SetGCPercent,
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -73,12 +74,17 @@ func EnterPassword(confirmPassword bool, pr PasswordReader) (string, error) {
|
||||
return passphrase, nil
|
||||
}
|
||||
|
||||
// ExpandSingleEndpointIfFile expands the path for --http-web3provider if specified as a file.
|
||||
// ExpandSingleEndpointIfFile expands the path for --execution-provider if specified as a file.
|
||||
func ExpandSingleEndpointIfFile(ctx *cli.Context, flag *cli.StringFlag) error {
|
||||
// Return early if no flag value is set.
|
||||
if !ctx.IsSet(flag.Name) {
|
||||
return nil
|
||||
}
|
||||
// Return early for non-unix operating systems, as there is
|
||||
// no shell path expansion for ipc endpoints on windows.
|
||||
if runtime.GOOS == "windows" {
|
||||
return nil
|
||||
}
|
||||
web3endpoint := ctx.String(flag.Name)
|
||||
switch {
|
||||
case strings.HasPrefix(web3endpoint, "http://"):
|
||||
|
||||
@@ -11,9 +11,11 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//cmd/prysmctl/checkpoint:go_default_library",
|
||||
"//cmd/prysmctl/checkpointsync:go_default_library",
|
||||
"//cmd/prysmctl/deprecated:go_default_library",
|
||||
"//cmd/prysmctl/p2p:go_default_library",
|
||||
"//cmd/prysmctl/testnet:go_default_library",
|
||||
"//cmd/prysmctl/weaksubjectivity:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -3,11 +3,10 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checkpoint.go",
|
||||
"latest.go",
|
||||
"save.go",
|
||||
"cmd.go",
|
||||
"download.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/checkpoint",
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/checkpointsync",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/beacon:go_default_library",
|
||||
14
cmd/prysmctl/checkpointsync/cmd.go
Normal file
14
cmd/prysmctl/checkpointsync/cmd.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package checkpointsync
|
||||
|
||||
import "github.com/urfave/cli/v2"
|
||||
|
||||
var Commands = []*cli.Command{
|
||||
{
|
||||
Name: "checkpoint-sync",
|
||||
Aliases: []string{"cpt-sync"},
|
||||
Usage: "commands for managing checkpoint sync",
|
||||
Subcommands: []*cli.Command{
|
||||
downloadCmd,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package checkpoint
|
||||
package checkpointsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -10,37 +10,38 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var saveFlags = struct {
|
||||
var downloadFlags = struct {
|
||||
BeaconNodeHost string
|
||||
Timeout time.Duration
|
||||
}{}
|
||||
|
||||
var saveCmd = &cli.Command{
|
||||
Name: "save",
|
||||
Usage: "Save the latest finalized header and the most recent block it integrates. To be used for checkpoint sync.",
|
||||
Action: cliActionSave,
|
||||
var downloadCmd = &cli.Command{
|
||||
Name: "download",
|
||||
Aliases: []string{"dl"},
|
||||
Usage: "Download the latest finalized state and the most recent block it integrates. To be used for checkpoint sync.",
|
||||
Action: cliActionDownload,
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "beacon-node-host",
|
||||
Usage: "host:port for beacon node connection",
|
||||
Destination: &saveFlags.BeaconNodeHost,
|
||||
Destination: &downloadFlags.BeaconNodeHost,
|
||||
Value: "localhost:3500",
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "http-timeout",
|
||||
Usage: "timeout for http requests made to beacon-node-url (uses duration format, ex: 2m31s). default: 4m",
|
||||
Destination: &saveFlags.Timeout,
|
||||
Destination: &downloadFlags.Timeout,
|
||||
Value: time.Minute * 4,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func cliActionSave(_ *cli.Context) error {
|
||||
func cliActionDownload(_ *cli.Context) error {
|
||||
ctx := context.Background()
|
||||
f := saveFlags
|
||||
f := downloadFlags
|
||||
|
||||
opts := []beacon.ClientOpt{beacon.WithTimeout(f.Timeout)}
|
||||
client, err := beacon.NewClient(saveFlags.BeaconNodeHost, opts...)
|
||||
client, err := beacon.NewClient(downloadFlags.BeaconNodeHost, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
12
cmd/prysmctl/deprecated/BUILD.bazel
Normal file
12
cmd/prysmctl/deprecated/BUILD.bazel
Normal file
@@ -0,0 +1,12 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["cmd.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/deprecated",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cmd/prysmctl/deprecated/checkpoint:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
)
|
||||
13
cmd/prysmctl/deprecated/checkpoint/BUILD.bazel
Normal file
13
cmd/prysmctl/deprecated/checkpoint/BUILD.bazel
Normal file
@@ -0,0 +1,13 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checkpoint.go",
|
||||
"latest.go",
|
||||
"save.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v3/cmd/prysmctl/deprecated/checkpoint",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_urfave_cli_v2//:go_default_library"],
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user