mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 22:07:59 -05:00
Compare commits
21 Commits
expStateHa
...
debugging-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
146b4bc386 | ||
|
|
97fe7d3898 | ||
|
|
809cc049f0 | ||
|
|
de52ebb70a | ||
|
|
f7224413de | ||
|
|
277a3dd83f | ||
|
|
20c790bc76 | ||
|
|
d36cefd16f | ||
|
|
14b4bb2ff9 | ||
|
|
964695b7a7 | ||
|
|
18d6b8a1f4 | ||
|
|
8fd2ea6955 | ||
|
|
d6af5a3fe1 | ||
|
|
24001d3679 | ||
|
|
51935959d6 | ||
|
|
2edc281b84 | ||
|
|
c468f650bf | ||
|
|
2c81dd95ec | ||
|
|
95d2a70bcb | ||
|
|
6e16bb5cd6 | ||
|
|
902d0295c1 |
22
CHANGELOG.md
22
CHANGELOG.md
@@ -16,8 +16,6 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Added SubmitPoolAttesterSlashingV2 endpoint.
|
||||
- Added SubmitAggregateAndProofsRequestV2 endpoint.
|
||||
- Updated the `beacon-chain/monitor` package to Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14562)
|
||||
- Added ListAttestationsV2 endpoint.
|
||||
- Add ability to rollback node's internal state during processing.
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -30,14 +28,6 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Return false from HasBlock if the block is being synced.
|
||||
- Cleanup forkchoice on failed insertions.
|
||||
- Use read only validator for core processing to avoid unnecessary copying.
|
||||
- Use ROBlock across block processing pipeline.
|
||||
- Added missing Eth-Consensus-Version headers to GetBlockAttestationsV2 and GetAttesterSlashingsV2 endpoints.
|
||||
- Updated pgo profile for beacon chain with holesky data. This improves the profile guided
|
||||
optimizations in the go compiler.
|
||||
- Use read only state when computing the active validator list.
|
||||
- Simplified `ExitedValidatorIndices`.
|
||||
- Simplified `EjectedValidatorIndices`.
|
||||
- `engine_newPayloadV4`,`engine_getPayloadV4` are changes due to new execution request serialization decisions, [PR](https://github.com/prysmaticlabs/prysm/pull/14580)
|
||||
|
||||
### Deprecated
|
||||
|
||||
@@ -46,22 +36,11 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
### Removed
|
||||
|
||||
- Removed finalized validator index cache, no longer needed.
|
||||
- Removed validator queue position log on key reload and wait for activation.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed mesh size by appending `gParams.Dhi = gossipSubDhi`
|
||||
- Fix skipping partial withdrawals count.
|
||||
- wait for the async StreamEvent writer to exit before leaving the http handler, avoiding race condition panics [pr](https://github.com/prysmaticlabs/prysm/pull/14557)
|
||||
- Certain deb files were returning a 404 which made building new docker images without an existing
|
||||
cache impossible. This has been fixed with updates to rules_oci and bazel-lib.
|
||||
- Fixed an issue where the length check between block body KZG commitments and the existing cache from the database was incompatible.
|
||||
- Fix `--backfill-oldest-slot` handling - this flag was totally broken, the code would always backfill to the default slot [pr](https://github.com/prysmaticlabs/prysm/pull/14584)
|
||||
- Fix keymanager API should return corrected error format for malformed tokens
|
||||
- Fix keymanager API so that get keys returns an empty response instead of a 500 error when using an unsupported keystore.
|
||||
- Small log imporvement, removing some redundant or duplicate logs
|
||||
- EIP7521 - Fixes withdrawal bug by accounting for pending partial withdrawals and deducting already withdrawn amounts from the sweep balance. [PR](https://github.com/prysmaticlabs/prysm/pull/14578)
|
||||
|
||||
|
||||
### Security
|
||||
|
||||
@@ -115,7 +94,6 @@ Updating to this release is recommended at your convenience.
|
||||
- fastssz version bump (better error messages).
|
||||
- SSE implementation that sheds stuck clients. [pr](https://github.com/prysmaticlabs/prysm/pull/14413)
|
||||
- Added GetPoolAttesterSlashingsV2 endpoint.
|
||||
- Use engine API get-blobs for block subscriber to reduce block import latency and potentially reduce bandwidth.
|
||||
|
||||
### Changed
|
||||
|
||||
|
||||
@@ -101,9 +101,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "aspect_bazel_lib",
|
||||
sha256 = "a272d79bb0ac6b6965aa199b1f84333413452e87f043b53eca7f347a23a478e8",
|
||||
strip_prefix = "bazel-lib-2.9.3",
|
||||
url = "https://github.com/bazel-contrib/bazel-lib/releases/download/v2.9.3/bazel-lib-v2.9.3.tar.gz",
|
||||
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
|
||||
strip_prefix = "bazel-lib-2.5.0",
|
||||
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
|
||||
)
|
||||
|
||||
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
|
||||
@@ -165,7 +165,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
|
||||
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
|
||||
image = "gcr.io/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64/v8",
|
||||
|
||||
@@ -21,8 +21,7 @@ type GetCommitteesResponse struct {
|
||||
}
|
||||
|
||||
type ListAttestationsResponse struct {
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
Data []*Attestation `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitAttestationsRequest struct {
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -45,7 +44,7 @@ type ForkchoiceFetcher interface {
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
@@ -243,7 +242,7 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Ep
|
||||
if !s.hasHeadState() {
|
||||
return []primitives.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headStateReadOnly(ctx), epoch)
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -45,10 +44,10 @@ func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
}
|
||||
|
||||
// InsertNode is a wrapper for node insertion which is self locked
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block consensus_blocks.ROBlock) error {
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
@@ -39,7 +38,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, consensus_blocks.ROBlock, error) {
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -60,26 +59,7 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
if err != nil {
|
||||
return nil, consensus_blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, consensus_blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
@@ -142,9 +122,9 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
|
||||
|
||||
@@ -336,24 +316,24 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
@@ -433,12 +413,12 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -469,12 +449,12 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1135,14 +1135,9 @@ func TestComputePayloadAttribute(t *testing.T) {
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
ctx: ctx,
|
||||
blockRoot: [32]byte{'a'},
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
|
||||
@@ -20,16 +20,17 @@ func Verify(sidecars ...blocks.ROBlob) error {
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs[i] = bytesToBlob(sidecar.Blob)
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
return
|
||||
return &ret
|
||||
}
|
||||
|
||||
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
|
||||
|
||||
@@ -47,11 +47,11 @@ func GetRandBlob(seed int64) GoKZG.Blob {
|
||||
}
|
||||
|
||||
func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZGProof, error) {
|
||||
commitment, err := kzgContext.BlobToKZGCommitment(blob, 0)
|
||||
commitment, err := kzgContext.BlobToKZGCommitment(&blob, 0)
|
||||
if err != nil {
|
||||
return GoKZG.KZGCommitment{}, GoKZG.KZGProof{}, err
|
||||
}
|
||||
proof, err := kzgContext.ComputeBlobKZGProof(blob, commitment, 0)
|
||||
proof, err := kzgContext.ComputeBlobKZGProof(&blob, commitment, 0)
|
||||
if err != nil {
|
||||
return GoKZG.KZGCommitment{}, GoKZG.KZGProof{}, err
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
blob := GoKZG.Blob{0x01, 0x02}
|
||||
commitment := GoKZG.KZGCommitment{0x01, 0x02}
|
||||
proof := GoKZG.KZGProof{0x01, 0x02}
|
||||
require.DeepEqual(t, blob, bytesToBlob(bytes))
|
||||
require.DeepEqual(t, blob, *bytesToBlob(bytes))
|
||||
require.DeepEqual(t, commitment, bytesToCommitment(bytes))
|
||||
require.DeepEqual(t, proof, bytesToKZGProof(bytes))
|
||||
}
|
||||
|
||||
@@ -32,18 +32,18 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||
|
||||
cp := ðpb.Checkpoint{}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
blkWithStateBadAtt := util.NewBeaconBlock()
|
||||
blkWithStateBadAtt.Block.Slot = 1
|
||||
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp = ðpb.Checkpoint{Root: r[:]}
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -139,9 +139,9 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, roblock, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, roblock))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
@@ -318,9 +318,10 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
st, roblock, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||
@@ -336,9 +337,10 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: r2[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
s, err := slots.EpochStart(newCheckpoint.Epoch)
|
||||
|
||||
@@ -46,7 +46,8 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
// process the beacon block after validating the state transition function
|
||||
type postBlockProcessConfig struct {
|
||||
ctx context.Context
|
||||
roblock consensusblocks.ROBlock
|
||||
signed interfaces.ReadOnlySignedBeaconBlock
|
||||
blockRoot [32]byte
|
||||
headRoot [32]byte
|
||||
postState state.BeaconState
|
||||
isValidPayload bool
|
||||
@@ -60,7 +61,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
ctx, span := trace.StartSpan(cfg.ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
cfg.ctx = ctx
|
||||
if err := consensusblocks.BeaconBlockIsNil(cfg.roblock); err != nil {
|
||||
if err := consensusblocks.BeaconBlockIsNil(cfg.signed); err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
startTime := time.Now()
|
||||
@@ -72,20 +73,19 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
defer s.sendLightClientFeeds(cfg)
|
||||
defer s.sendStateFeedOnBlock(cfg)
|
||||
defer reportProcessingTime(startTime)
|
||||
defer reportAttestationInclusion(cfg.roblock.Block())
|
||||
defer reportAttestationInclusion(cfg.signed.Block())
|
||||
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.roblock)
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.blockRoot)
|
||||
if err != nil {
|
||||
s.rollbackBlock(ctx, cfg.roblock.Root())
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.roblock.Block().Slot())
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.signed.Block().Slot())
|
||||
}
|
||||
if err := s.handleBlockAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
|
||||
if err := s.handleBlockAttestations(ctx, cfg.signed.Block(), cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.roblock.Block().Body().AttesterSlashings())
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.signed.Block().Body().AttesterSlashings())
|
||||
if cfg.isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.roblock.Root()); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
@@ -95,8 +95,8 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
if cfg.headRoot != cfg.roblock.Root() {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
if cfg.headRoot != cfg.blockRoot {
|
||||
s.logNonCanonicalBlockReceived(cfg.blockRoot, cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
@@ -154,7 +154,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0].Block(), preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -234,7 +234,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
@@ -279,7 +279,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Set their optimistic status
|
||||
@@ -688,15 +688,3 @@ func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, bl
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// In the event of an issue processing a block we rollback changes done to the db and our caches
|
||||
// to always ensure that the node's internal state is consistent.
|
||||
func (s *Service) rollbackBlock(ctx context.Context, blockRoot [32]byte) {
|
||||
log.Warnf("Rolling back insertion of block with root %#x due to processing error", blockRoot)
|
||||
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, blockRoot); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete state from caches with block root %#x", blockRoot)
|
||||
}
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, blockRoot); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete block with block root %#x", blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -43,7 +42,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
slot := cfg.signed.Block().Slot()
|
||||
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
|
||||
return nil
|
||||
}
|
||||
@@ -51,9 +50,9 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if cfg.blockRoot == cfg.headRoot {
|
||||
fcuArgs.headState = cfg.postState
|
||||
fcuArgs.headBlock = cfg.roblock
|
||||
fcuArgs.headBlock = cfg.signed
|
||||
fcuArgs.headRoot = cfg.headRoot
|
||||
fcuArgs.proposingSlot = s.CurrentSlot() + 1
|
||||
return nil
|
||||
@@ -97,7 +96,7 @@ func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig, fcuArgs
|
||||
|
||||
// sendStateFeedOnBlock sends an event that a new block has been synced
|
||||
func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.roblock.Root())
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
@@ -106,9 +105,9 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: cfg.roblock.Block().Slot(),
|
||||
BlockRoot: cfg.roblock.Root(),
|
||||
SignedBlock: cfg.roblock,
|
||||
Slot: cfg.signed.Block().Slot(),
|
||||
BlockRoot: cfg.blockRoot,
|
||||
SignedBlock: cfg.signed,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
@@ -118,7 +117,7 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
// sendLightClientFeeds sends the light client feeds when feature flag is enabled.
|
||||
func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
if features.Get().EnableLightClient {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.signed, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to send light client optimistic update")
|
||||
}
|
||||
|
||||
@@ -126,7 +125,7 @@ func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
|
||||
// LightClientFinalityUpdate needs super majority
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.roblock, finalized, cfg.postState)
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.signed, finalized, cfg.postState)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -253,21 +252,20 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
slot := cfg.postState.Slot()
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, cfg.blockRoot[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, cfg.blockRoot[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.blockRoot {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
}
|
||||
}
|
||||
@@ -283,7 +281,7 @@ func reportProcessingTime(startTime time.Time) {
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if cfg.blockRoot == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -440,7 +438,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
@@ -450,15 +448,10 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The first block can have a bogus root since the block is not inserted in forkchoice
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
root := roblock.Block().ParentRoot()
|
||||
root := blk.ParentRoot()
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
@@ -467,12 +460,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
if b.Block().Slot() <= fSlot {
|
||||
break
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(b, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root = b.Block().ParentRoot()
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
JustifiedCheckpoint: jCheckpoint,
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
|
||||
@@ -145,8 +145,9 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -189,7 +190,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -245,7 +246,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
// Set finalized epoch to 2.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 1 node: block 65
|
||||
@@ -278,7 +279,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
@@ -565,9 +566,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -615,9 +614,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -643,9 +640,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, nil, [32]byte{}, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -693,9 +688,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, false}))
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1121,9 +1114,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb1, r1, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1133,9 +1124,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb2, r2, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1145,9 +1134,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb3, r3, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1157,9 +1144,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb4, r4, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1234,9 +1219,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1254,9 +1237,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1275,9 +1256,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1299,9 +1278,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1329,9 +1306,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
@@ -1360,9 +1335,7 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1424,9 +1397,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1444,9 +1415,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1466,9 +1435,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1490,9 +1457,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1548,9 +1513,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1615,9 +1578,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1636,9 +1597,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1657,9 +1616,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1686,9 +1643,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, invalidRoots[i-13], [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
@@ -1753,9 +1708,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}))
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -1778,9 +1731,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -1806,9 +1757,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -1864,9 +1813,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1884,9 +1831,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1905,9 +1850,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1936,9 +1879,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -2054,9 +1995,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2120,9 +2059,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2272,11 +2209,11 @@ func Test_getFCUArgs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
signed: wsb,
|
||||
blockRoot: [32]byte{'a'},
|
||||
postState: st,
|
||||
isValidPayload: true,
|
||||
}
|
||||
@@ -2286,64 +2223,11 @@ func Test_getFCUArgs(t *testing.T) {
|
||||
require.ErrorContains(t, "block does not exist", err)
|
||||
|
||||
// canonical branch
|
||||
cfg.headRoot = cfg.roblock.Root()
|
||||
cfg.headRoot = cfg.blockRoot
|
||||
fcuArgs = &fcuConfig{}
|
||||
err = s.getFCUArgs(cfg, fcuArgs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cfg.roblock.Root(), fcuArgs.headRoot)
|
||||
}
|
||||
|
||||
func TestRollbackBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set invalid parent root to trigger forkchoice error.
|
||||
wsb.SetParentRoot([]byte("bad"))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err = service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
require.Equal(t, cfg.blockRoot, fcuArgs.headRoot)
|
||||
}
|
||||
|
||||
func fakeCommitments(n int) [][]byte {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -47,7 +46,7 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r32))
|
||||
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32.Root(), params.BeaconConfig().ZeroHash, fc, fc)
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32, params.BeaconConfig().ZeroHash, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r33))
|
||||
|
||||
@@ -55,12 +54,10 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = []byte{'c'}
|
||||
r33Root := r33.Root()
|
||||
a.Data.BeaconBlockRoot = r33Root[:]
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
|
||||
|
||||
r32Root := r32.Root()
|
||||
a.Data.Target.Root = r32Root[:]
|
||||
a.Data.Target.Root = r32[:]
|
||||
err = service.VerifyLmdFfgConsistency(context.Background(), a)
|
||||
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
@@ -119,9 +116,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -181,9 +176,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -101,13 +100,10 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, blockCopy, postState); err != nil {
|
||||
return errors.Wrap(err, "could not save post state info")
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
signed: blockCopy,
|
||||
blockRoot: blockRoot,
|
||||
postState: postState,
|
||||
isValidPayload: isValidPayload,
|
||||
}
|
||||
|
||||
@@ -36,7 +36,6 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -304,15 +303,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint state")
|
||||
}
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
@@ -524,11 +515,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
gb, err := consensus_blocks.NewROBlockWithRoot(genesisBlk, genesisBlkRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, gb); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
log.WithError(err).Fatal("Could not process genesis block for fork choice")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)
|
||||
|
||||
@@ -376,15 +376,11 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -457,11 +453,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(b, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -567,7 +567,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -588,26 +588,7 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
// CachedHeadRoot mocks the same method in the chain service
|
||||
@@ -650,9 +631,9 @@ func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, block blocks.ROBlock) error {
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -120,36 +120,35 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
// expected_withdrawals, partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
//
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
//
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[partial_withdrawals_count:]
|
||||
//
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[processed_partial_withdrawals_count:]
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
//
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
expectedWithdrawals, processedPartialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
expectedWithdrawals, partialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
@@ -193,7 +192,7 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
}
|
||||
|
||||
if st.Version() >= version.Electra {
|
||||
if err := st.DequeuePartialWithdrawals(processedPartialWithdrawalsCount); err != nil {
|
||||
if err := st.DequeuePartialWithdrawals(partialWithdrawalsCount); err != nil {
|
||||
return nil, fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,35 +241,76 @@ func SlashedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validat
|
||||
return slashed
|
||||
}
|
||||
|
||||
// ExitedValidatorIndices returns the indices of validators who exited during the specified epoch.
|
||||
//
|
||||
// A validator is considered to have exited during an epoch if their ExitEpoch equals the epoch and
|
||||
// excludes validators that have been ejected.
|
||||
// This function simplifies the exit determination by directly checking the validator's ExitEpoch,
|
||||
// avoiding the complexities and potential inaccuracies of calculating withdrawable epochs.
|
||||
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
|
||||
// ExitedValidatorIndices determines the indices exited during the current epoch.
|
||||
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
|
||||
exited := make([]primitives.ValidatorIndex, 0)
|
||||
exitEpochs := make([]primitives.Epoch, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
val := validators[i]
|
||||
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
exitEpochs = append(exitEpochs, val.ExitEpoch)
|
||||
}
|
||||
}
|
||||
exitQueueEpoch := primitives.Epoch(0)
|
||||
for _, i := range exitEpochs {
|
||||
if exitQueueEpoch < i {
|
||||
exitQueueEpoch = i
|
||||
}
|
||||
}
|
||||
|
||||
// We use the exit queue churn to determine if we have passed a churn limit.
|
||||
exitQueueChurn := uint64(0)
|
||||
for _, val := range validators {
|
||||
if val.ExitEpoch == exitQueueEpoch {
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
for i, val := range validators {
|
||||
if val.ExitEpoch == epoch && val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
|
||||
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
|
||||
val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
|
||||
exited = append(exited, primitives.ValidatorIndex(i))
|
||||
}
|
||||
}
|
||||
return exited, nil
|
||||
}
|
||||
|
||||
// EjectedValidatorIndices returns the indices of validators who were ejected during the specified epoch.
|
||||
//
|
||||
// A validator is considered ejected during an epoch if:
|
||||
// - Their ExitEpoch equals the epoch.
|
||||
// - Their EffectiveBalance is less than or equal to the EjectionBalance threshold.
|
||||
//
|
||||
// This function simplifies the ejection determination by directly checking the validator's ExitEpoch
|
||||
// and EffectiveBalance, avoiding the complexities and potential inaccuracies of calculating
|
||||
// withdrawable epochs.
|
||||
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
|
||||
// EjectedValidatorIndices determines the indices ejected during the given epoch.
|
||||
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
|
||||
ejected := make([]primitives.ValidatorIndex, 0)
|
||||
exitEpochs := make([]primitives.Epoch, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
val := validators[i]
|
||||
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
exitEpochs = append(exitEpochs, val.ExitEpoch)
|
||||
}
|
||||
}
|
||||
exitQueueEpoch := primitives.Epoch(0)
|
||||
for _, i := range exitEpochs {
|
||||
if exitQueueEpoch < i {
|
||||
exitQueueEpoch = i
|
||||
}
|
||||
}
|
||||
|
||||
// We use the exit queue churn to determine if we have passed a churn limit.
|
||||
exitQueueChurn := uint64(0)
|
||||
for _, val := range validators {
|
||||
if val.ExitEpoch == exitQueueEpoch {
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
for i, val := range validators {
|
||||
if val.ExitEpoch == epoch && val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
|
||||
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
|
||||
val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
|
||||
ejected = append(ejected, primitives.ValidatorIndex(i))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,16 +389,19 @@ func TestExitedValidatorIndices(t *testing.T) {
|
||||
state: ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 10,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: 10,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -430,7 +433,11 @@ func TestExitedValidatorIndices(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators)
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
activeCount, err := helpers.ActiveValidatorCount(context.Background(), s, time.PrevEpoch(s))
|
||||
require.NoError(t, err)
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators, activeCount)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.wanted, exitedIndices)
|
||||
}
|
||||
|
||||
@@ -139,7 +139,6 @@ func (s *Store) SaveStates(ctx context.Context, states []state.ReadOnlyBeaconSta
|
||||
}
|
||||
multipleEncs[i] = stateBytes
|
||||
}
|
||||
log.Infof("Total serialization: %s", time.Since(startTime).String())
|
||||
|
||||
if err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateBucket)
|
||||
|
||||
@@ -37,7 +37,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -106,11 +105,8 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -216,16 +216,19 @@ func TestService_BlockNumberByTimestamp(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
web3Service = setDefaultMocks(web3Service)
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
wantedTime := time.Now().Unix() + 100*10
|
||||
|
||||
for i := 0; i < 200; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
// Throw away error in case adjustment wasn't successful.
|
||||
err = testAcc.Backend.AdjustTime(10 * time.Second)
|
||||
_ = err
|
||||
}
|
||||
ctx := context.Background()
|
||||
hd, err := testAcc.Backend.HeaderByNumber(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
web3Service.latestEth1Data.BlockTime = hd.Time
|
||||
web3Service.latestEth1Data.BlockHeight = hd.Number.Uint64()
|
||||
blk, err := web3Service.BlockByTimestamp(ctx, 1000 /* time */)
|
||||
blk, err := web3Service.BlockByTimestamp(ctx, uint64(wantedTime) /* time */)
|
||||
require.NoError(t, err)
|
||||
if blk.Number.Cmp(big.NewInt(0)) == 0 {
|
||||
t.Error("Returned a block with zero number, expected to be non zero")
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -24,7 +23,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -81,8 +79,6 @@ const (
|
||||
GetPayloadBodiesByRangeV1 = "engine_getPayloadBodiesByRangeV1"
|
||||
// ExchangeCapabilities request string for JSON-RPC.
|
||||
ExchangeCapabilities = "engine_exchangeCapabilities"
|
||||
// GetBlobsV1 request string for JSON-RPC.
|
||||
GetBlobsV1 = "engine_getBlobsV1"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
@@ -97,15 +93,16 @@ type ForkchoiceUpdatedResponse struct {
|
||||
ValidationError string `json:"validationError"`
|
||||
}
|
||||
|
||||
// Reconstructor defines a service responsible for reconstructing full beacon chain objects by utilizing the execution API and making requests through the execution client.
|
||||
type Reconstructor interface {
|
||||
// PayloadReconstructor defines a service that can reconstruct a full beacon
|
||||
// block with an execution payload from a signed beacon block and a connection
|
||||
// to an execution client's engine API.
|
||||
type PayloadReconstructor interface {
|
||||
ReconstructFullBlock(
|
||||
ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (interfaces.SignedBeaconBlock, error)
|
||||
ReconstructFullBellatrixBlockBatch(
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, indices []bool) ([]blocks.VerifiedROBlob, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -497,20 +494,6 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
return hdr, err
|
||||
}
|
||||
|
||||
// GetBlobs returns the blob and proof from the execution engine for the given versioned hashes.
|
||||
func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProof, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobs")
|
||||
defer span.End()
|
||||
// If the execution engine does not support `GetBlobsV1`, return early to prevent encountering an error later.
|
||||
if !s.capabilityCache.has(GetBlobsV1) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProof, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV1, versionedHashes)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
@@ -539,109 +522,6 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
return unb, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobSidecars reconstructs the verified blob sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and proofs,
|
||||
// and constructs the corresponding verified read-only blob sidecars.
|
||||
//
|
||||
// The 'exists' argument is a boolean list (must be the same length as body.BlobKzgCommitments), where each element corresponds to whether a
|
||||
// particular blob sidecar already exists. If exists[i] is true, the blob for the i-th KZG commitment
|
||||
// has already been retrieved and does not need to be fetched again from the execution layer (EL).
|
||||
//
|
||||
// For example:
|
||||
// - len(block.Body().BlobKzgCommitments()) == 6
|
||||
// - If exists = [true, false, true, false, true, false], the function will fetch the blobs
|
||||
// associated with indices 1, 3, and 5 (since those are marked as non-existent).
|
||||
// - If exists = [false ... x 6], the function will attempt to fetch all blobs.
|
||||
//
|
||||
// Only the blobs that do not already exist (where exists[i] is false) are fetched using the KZG commitments from block body.
|
||||
func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, exists []bool) ([]blocks.VerifiedROBlob, error) {
|
||||
blockBody := block.Block().Body()
|
||||
kzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get blob KZG commitments")
|
||||
}
|
||||
if len(kzgCommitments) > len(exists) {
|
||||
return nil, fmt.Errorf("length of KZG commitments (%d) is greater than length of exists (%d)", len(kzgCommitments), len(exists))
|
||||
}
|
||||
|
||||
// Collect KZG hashes for non-existing blobs
|
||||
var kzgHashes []common.Hash
|
||||
for i, commitment := range kzgCommitments {
|
||||
if !exists[i] {
|
||||
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
|
||||
}
|
||||
}
|
||||
if len(kzgHashes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Fetch blobs from EL
|
||||
blobs, err := s.GetBlobs(ctx, kzgHashes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get blobs")
|
||||
}
|
||||
if len(blobs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
header, err := block.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get header")
|
||||
}
|
||||
|
||||
// Reconstruct verified blob sidecars
|
||||
var verifiedBlobs []blocks.VerifiedROBlob
|
||||
for i, blobIndex := 0, 0; i < len(kzgCommitments); i++ {
|
||||
if exists[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
if blobIndex >= len(blobs) || blobs[blobIndex] == nil {
|
||||
blobIndex++
|
||||
continue
|
||||
}
|
||||
blob := blobs[blobIndex]
|
||||
blobIndex++
|
||||
|
||||
proof, err := blocks.MerkleProofKZGCommitment(blockBody, i)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to get Merkle proof for KZG commitment")
|
||||
continue
|
||||
}
|
||||
sidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(i),
|
||||
Blob: blob.Blob,
|
||||
KzgCommitment: kzgCommitments[i],
|
||||
KzgProof: blob.KzgProof,
|
||||
SignedBlockHeader: header,
|
||||
CommitmentInclusionProof: proof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlobWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to create RO blob with root")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the sidecar KZG proof
|
||||
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
|
||||
if err := v.SidecarKzgProofVerified(); err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify KZG proof for sidecar")
|
||||
continue
|
||||
}
|
||||
|
||||
verifiedBlob, err := v.VerifiedROBlob()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify RO blob")
|
||||
continue
|
||||
}
|
||||
|
||||
verifiedBlobs = append(verifiedBlobs, verifiedBlob)
|
||||
}
|
||||
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBody, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
|
||||
@@ -2,7 +2,6 @@ package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -21,7 +20,6 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
mocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -39,9 +37,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
_ = Reconstructor(&Service{})
|
||||
_ = PayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&Service{})
|
||||
_ = Reconstructor(&Service{})
|
||||
_ = PayloadReconstructor(&Service{})
|
||||
_ = EngineCaller(&mocks.EngineClient{})
|
||||
)
|
||||
|
||||
@@ -1769,9 +1767,7 @@ func fixturesStruct() *payloadFixtures {
|
||||
Proofs: []hexutil.Bytes{[]byte("proof1"), []byte("proof2")},
|
||||
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
|
||||
},
|
||||
ExecutionRequests: []hexutil.Bytes{append([]byte{pb.DepositRequestType}, depositRequestBytes...),
|
||||
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
|
||||
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
|
||||
ExecutionRequests: []hexutil.Bytes{depositRequestBytes, withdrawalRequestBytes, consolidationRequestBytes},
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
@@ -2394,122 +2390,3 @@ func Test_ExchangeCapabilities(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructBlobSidecars(t *testing.T) {
|
||||
client := &Service{capabilityCache: &capabilityCache{}}
|
||||
b := util.NewBeaconBlockDeneb()
|
||||
kzgCommitments := createRandomKzgCommitments(t, 6)
|
||||
|
||||
b.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
t.Run("all seen", func(t *testing.T) {
|
||||
exists := []bool{true, true, true, true, true, true}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("get-blobs end point is not supported", func(t *testing.T) {
|
||||
exists := []bool{true, true, true, true, true, false}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV1: nil}}
|
||||
|
||||
t.Run("recovered 6 missing blobs", func(t *testing.T) {
|
||||
srv := createBlobServer(t, 6)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := [6]bool{}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("recovered 3 missing blobs", func(t *testing.T) {
|
||||
srv := createBlobServer(t, 3)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := []bool{true, false, true, false, true, false}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("kzg is longer than exist", func(t *testing.T) {
|
||||
srv := createBlobServer(t, 3)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := []bool{true, false, true, false, true}
|
||||
_, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
require.ErrorContains(t, "length of KZG commitments (6) is greater than length of exists (5)", err)
|
||||
})
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
kzgCommitments := make([][]byte, num)
|
||||
for i := range kzgCommitments {
|
||||
kzgCommitments[i] = make([]byte, 48)
|
||||
_, err := rand.Read(kzgCommitments[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return kzgCommitments
|
||||
}
|
||||
|
||||
func createBlobServer(t *testing.T, numBlobs int) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
blobs := make([]pb.BlobAndProofJson, numBlobs)
|
||||
for i := range blobs {
|
||||
blobs[i] = pb.BlobAndProofJson{Blob: []byte(fmt.Sprintf("blob%d", i+1)), KzgProof: []byte(fmt.Sprintf("proof%d", i+1))}
|
||||
}
|
||||
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": blobs,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
}
|
||||
|
||||
func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, err := rpc.DialHTTP(url)
|
||||
require.NoError(t, err)
|
||||
|
||||
client.rpcClient = rpcClient
|
||||
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV1: nil}}
|
||||
client.blobVerifier = testNewBlobVerifier()
|
||||
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func testNewBlobVerifier() verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return &verification.MockBlobVerifier{
|
||||
CbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -405,8 +405,10 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
web3Service.httpLogger = testAcc.Backend
|
||||
web3Service.latestEth1Data.LastRequestedBlock = 0
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
hdr, err := testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
web3Service.latestEth1Data.BlockHeight = hdr.Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = hdr.Time
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.MinGenesisTime = 0
|
||||
bConfig.SecondsPerETH1Block = 10
|
||||
@@ -415,7 +417,7 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
nConfig.ContractDeploymentBlock = 0
|
||||
params.OverrideBeaconNetworkConfig(nConfig)
|
||||
|
||||
testAcc.Backend.Commit()
|
||||
assert.NoError(t, testAcc.Backend.AdjustTime(10*time.Second))
|
||||
|
||||
totalNumOfDeposits := depositsReqForChainStart + 30
|
||||
|
||||
@@ -438,14 +440,22 @@ func TestProcessETH2GenesisLog_CorrectNumOfDeposits(t *testing.T) {
|
||||
// 5
|
||||
if (i+1)%8 == depositOffset {
|
||||
testAcc.Backend.Commit()
|
||||
// Throw away error in case adjustment wasn't successful.
|
||||
err = testAcc.Backend.AdjustTime(10 * time.Second)
|
||||
_ = err
|
||||
}
|
||||
}
|
||||
// Forward the chain to account for the follow distance
|
||||
for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
// Throw away error in case adjustment wasn't successful.
|
||||
err = testAcc.Backend.AdjustTime(10 * time.Second)
|
||||
_ = err
|
||||
}
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
hdr, err = testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
web3Service.latestEth1Data.BlockHeight = hdr.Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = hdr.Time
|
||||
|
||||
// Set up our subscriber now to listen for the chain started event.
|
||||
stateChannel := make(chan *feed.Event, 1)
|
||||
@@ -502,8 +512,10 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
web3Service.httpLogger = testAcc.Backend
|
||||
web3Service.latestEth1Data.LastRequestedBlock = 0
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
hdr, err := testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
web3Service.latestEth1Data.BlockHeight = hdr.Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = hdr.Time
|
||||
bConfig := params.MinimalSpecConfig().Copy()
|
||||
bConfig.SecondsPerETH1Block = 10
|
||||
params.OverrideBeaconConfig(bConfig)
|
||||
@@ -540,14 +552,18 @@ func TestProcessETH2GenesisLog_LargePeriodOfNoLogs(t *testing.T) {
|
||||
for i := uint64(0); i < 1500; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
wantedGenesisTime := testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
hdr, err = testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
wantedGenesisTime := hdr.Time
|
||||
|
||||
// Forward the chain to account for the follow distance
|
||||
for i := uint64(0); i < params.BeaconConfig().Eth1FollowDistance; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
hdr, err = testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
web3Service.latestEth1Data.BlockHeight = hdr.Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = hdr.Time
|
||||
|
||||
// Set the genesis time 500 blocks ahead of the last
|
||||
// deposit log.
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/network"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/authorization"
|
||||
)
|
||||
@@ -116,11 +115,3 @@ func WithJwtId(jwtId string) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithVerifierWaiter gives the sync package direct access to the verifier waiter.
|
||||
func WithVerifierWaiter(v *verification.InitializerWaiter) Option {
|
||||
return func(s *Service) error {
|
||||
s.verifierWaiter = v
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,13 +78,6 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
currClient.Close()
|
||||
}
|
||||
log.WithField("endpoint", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url)).Info("Connected to new endpoint")
|
||||
|
||||
c, err := s.ExchangeCapabilities(ctx)
|
||||
if err != nil {
|
||||
errorLogger(err, "Could not exchange capabilities with execution client")
|
||||
}
|
||||
s.capabilityCache.save(c)
|
||||
|
||||
return
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
|
||||
@@ -29,9 +29,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/v5/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -157,9 +155,6 @@ type Service struct {
|
||||
lastReceivedMerkleIndex int64 // Keeps track of the last received index to prevent log spam.
|
||||
runError error
|
||||
preGenesisState state.BeaconState
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
blobVerifier verification.NewBlobVerifier
|
||||
capabilityCache *capabilityCache
|
||||
}
|
||||
|
||||
// NewService sets up a new instance with an ethclient when given a web3 endpoint as a string in the config.
|
||||
@@ -197,7 +192,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
lastReceivedMerkleIndex: -1,
|
||||
preGenesisState: genState,
|
||||
eth1HeadTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
|
||||
capabilityCache: &capabilityCache{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -235,13 +229,6 @@ func (s *Service) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
v, err := s.verifierWaiter.WaitForInitializer(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get verification initializer")
|
||||
return
|
||||
}
|
||||
s.blobVerifier = newBlobVerifierFromInitializer(v)
|
||||
|
||||
s.isRunning = true
|
||||
|
||||
// Poll the execution client connection and fallback if errors occur.
|
||||
@@ -899,39 +886,3 @@ func (s *Service) migrateOldDepositTree(eth1DataInDB *ethpb.ETH1ChainData) error
|
||||
func (s *Service) removeStartupState() {
|
||||
s.cfg.finalizedStateAtStartup = nil
|
||||
}
|
||||
|
||||
func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
type capabilityCache struct {
|
||||
capabilities map[string]interface{}
|
||||
capabilitiesLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (c *capabilityCache) save(cs []string) {
|
||||
c.capabilitiesLock.Lock()
|
||||
defer c.capabilitiesLock.Unlock()
|
||||
|
||||
if c.capabilities == nil {
|
||||
c.capabilities = make(map[string]interface{})
|
||||
}
|
||||
|
||||
for _, capability := range cs {
|
||||
c.capabilities[capability] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capabilityCache) has(capability string) bool {
|
||||
c.capabilitiesLock.RLock()
|
||||
defer c.capabilitiesLock.RUnlock()
|
||||
|
||||
if c.capabilities == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := c.capabilities[capability]
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -19,11 +19,8 @@ import (
|
||||
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/v5/contracts/deposit"
|
||||
@@ -95,16 +92,10 @@ func TestStart_OK(t *testing.T) {
|
||||
t.Cleanup(func() {
|
||||
server.Stop()
|
||||
})
|
||||
c := startup.NewClockSynchronizer()
|
||||
require.NoError(t, c.SetClock(startup.NewClock(time.Unix(0, 0), [32]byte{})))
|
||||
waiter := verification.NewInitializerWaiter(
|
||||
c, forkchoice.NewROForkChoice(nil), nil)
|
||||
|
||||
web3Service, err := NewService(context.Background(),
|
||||
WithHttpEndpoint(endpoint),
|
||||
WithDepositContractAddress(testAcc.ContractAddr),
|
||||
WithDatabase(beaconDB),
|
||||
WithVerifierWaiter(waiter),
|
||||
)
|
||||
require.NoError(t, err, "unable to setup execution service")
|
||||
web3Service = setDefaultMocks(web3Service)
|
||||
@@ -189,7 +180,9 @@ func TestService_Eth1Synced(t *testing.T) {
|
||||
web3Service.depositContractCaller, err = contracts.NewDepositContractCaller(testAcc.ContractAddr, testAcc.Backend)
|
||||
require.NoError(t, err)
|
||||
|
||||
currTime := testAcc.Backend.Blockchain().CurrentHeader().Time
|
||||
hdr, err := testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
currTime := hdr.Time
|
||||
now := time.Now()
|
||||
assert.NoError(t, testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0))))
|
||||
testAcc.Backend.Commit()
|
||||
@@ -220,14 +213,20 @@ func TestFollowBlock_OK(t *testing.T) {
|
||||
|
||||
web3Service = setDefaultMocks(web3Service)
|
||||
web3Service.rpcClient = &mockExecution.RPCClient{Backend: testAcc.Backend}
|
||||
baseHeight := testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
hdr, err := testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
baseHeight := hdr.Number.Uint64()
|
||||
// process follow_distance blocks
|
||||
for i := 0; i < int(params.BeaconConfig().Eth1FollowDistance); i++ {
|
||||
testAcc.Backend.Commit()
|
||||
// Throw away error in case adjustment wasn't successful.
|
||||
err = testAcc.Backend.AdjustTime(10 * time.Second)
|
||||
_ = err
|
||||
}
|
||||
hdr, err = testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
// set current height
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
web3Service.latestEth1Data.BlockHeight = hdr.Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = hdr.Time
|
||||
|
||||
h, err := web3Service.followedBlockHeight(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -236,11 +235,15 @@ func TestFollowBlock_OK(t *testing.T) {
|
||||
expectedHeight := numToForward + baseHeight
|
||||
// forward 2 blocks
|
||||
for i := uint64(0); i < numToForward; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
// Throw away error in case adjustment wasn't successful.
|
||||
err = testAcc.Backend.AdjustTime(10 * time.Second)
|
||||
_ = err
|
||||
}
|
||||
hdr, err = testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
// set current height
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentBlock().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentBlock().Time
|
||||
web3Service.latestEth1Data.BlockHeight = hdr.Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = hdr.Time
|
||||
|
||||
h, err = web3Service.followedBlockHeight(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -485,15 +488,20 @@ func TestNewService_EarliestVotingBlock(t *testing.T) {
|
||||
for i := 0; i < numToForward; i++ {
|
||||
testAcc.Backend.Commit()
|
||||
}
|
||||
currTime := testAcc.Backend.Blockchain().CurrentHeader().Time
|
||||
hdr, err := testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
currTime := hdr.Time
|
||||
now := time.Now()
|
||||
err = testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0)))
|
||||
require.NoError(t, err)
|
||||
testAcc.Backend.Commit()
|
||||
|
||||
currTime = testAcc.Backend.Blockchain().CurrentHeader().Time
|
||||
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentHeader().Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentHeader().Time
|
||||
hdr, err = testAcc.Backend.Client.HeaderByNumber(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
currTime = hdr.Time
|
||||
web3Service.latestEth1Data.BlockHeight = hdr.Number.Uint64()
|
||||
web3Service.latestEth1Data.BlockTime = hdr.Time
|
||||
web3Service.chainStartData.GenesisTime = currTime
|
||||
|
||||
// With a current slot of zero, only request follow_blocks behind.
|
||||
|
||||
@@ -36,8 +36,6 @@ type EngineClient struct {
|
||||
OverrideValidHash [32]byte
|
||||
GetPayloadResponse *blocks.GetPayloadResponse
|
||||
ErrGetPayload error
|
||||
BlobSidecars []blocks.VerifiedROBlob
|
||||
ErrorBlobSidecars error
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
@@ -108,11 +106,6 @@ func (e *EngineClient) ReconstructFullBellatrixBlockBatch(
|
||||
return fullBlocks, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobSidecars is a mock implementation of the ReconstructBlobSidecars method.
|
||||
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [32]byte, []bool) ([]blocks.VerifiedROBlob, error) {
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
|
||||
@@ -18,7 +18,6 @@ go_library(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -23,13 +23,13 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -11,3 +11,4 @@ var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
|
||||
var errInvalidNilCheckpoint = errors.New("invalid nil checkpoint")
|
||||
var errInvalidUnrealizedJustifiedEpoch = errors.New("invalid unrealized justified epoch")
|
||||
var errInvalidUnrealizedFinalizedEpoch = errors.New("invalid unrealized finalized epoch")
|
||||
var errNilBlockHeader = errors.New("invalid nil block header")
|
||||
|
||||
@@ -6,17 +6,18 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -104,10 +105,26 @@ func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []
|
||||
}
|
||||
|
||||
// InsertNode processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, roblock consensus_blocks.ROBlock) error {
|
||||
func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, root [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.InsertNode")
|
||||
defer span.End()
|
||||
|
||||
slot := state.Slot()
|
||||
bh := state.LatestBlockHeader()
|
||||
if bh == nil {
|
||||
return errNilBlockHeader
|
||||
}
|
||||
parentRoot := bytesutil.ToBytes32(bh.ParentRoot)
|
||||
var payloadHash [32]byte
|
||||
if state.Version() >= version.Bellatrix {
|
||||
ph, err := state.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ph != nil {
|
||||
copy(payloadHash[:], ph.BlockHash())
|
||||
}
|
||||
}
|
||||
jc := state.CurrentJustifiedCheckpoint()
|
||||
if jc == nil {
|
||||
return errInvalidNilCheckpoint
|
||||
@@ -118,7 +135,7 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
finalizedEpoch := fc.Epoch
|
||||
node, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
|
||||
node, err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -473,8 +490,15 @@ func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.B
|
||||
return nil
|
||||
}
|
||||
for i := len(chain) - 1; i > 0; i-- {
|
||||
b := chain[i].Block
|
||||
r := chain[i-1].Block.ParentRoot()
|
||||
parentRoot := b.ParentRoot()
|
||||
payloadHash, err := blocks.GetBlockPayloadHash(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := f.store.insert(ctx,
|
||||
chain[i].Block,
|
||||
b.Slot(), r, parentRoot, payloadHash,
|
||||
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justifiedEpoch primitives.Epoch,
|
||||
finalizedEpoch primitives.Epoch,
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -59,40 +59,21 @@ func prepareForkchoiceState(
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
f.votes = []Vote{
|
||||
{indexToHash(1), indexToHash(1), 0},
|
||||
@@ -113,15 +94,15 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
@@ -144,15 +125,15 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
@@ -175,24 +156,24 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 6, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 6, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
require.Equal(t, true, f.IsCanonical(params.BeaconConfig().ZeroHash))
|
||||
require.Equal(t, false, f.IsCanonical(indexToHash(1)))
|
||||
@@ -206,24 +187,24 @@ func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
|
||||
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
|
||||
@@ -252,15 +233,15 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
|
||||
f.store.treeRootNode.parent = nil
|
||||
|
||||
@@ -284,12 +265,12 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
|
||||
require.NoError(t, err)
|
||||
@@ -299,12 +280,12 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
|
||||
require.NoError(t, err)
|
||||
@@ -315,20 +296,20 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
// Insert a block it will be head
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
head, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'a'}, head)
|
||||
|
||||
// Insert two extra blocks
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
head, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, head)
|
||||
@@ -397,36 +378,36 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
// \-- c -- f
|
||||
// \-- g
|
||||
// \ -- h -- i -- j
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -517,18 +498,18 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
|
||||
// a -- b -- c -- d
|
||||
f = setup(0, 0)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
tests = []struct {
|
||||
name string
|
||||
r1 [32]byte
|
||||
@@ -609,9 +590,7 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
|
||||
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
@@ -620,16 +599,14 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
blk.Block.Slot = primitives.Slot(i)
|
||||
copiedRoot := root
|
||||
blk.Block.ParentRoot = copiedRoot[:]
|
||||
root, err = blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
|
||||
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
root, err = blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
args := make([]*forkchoicetypes.BlockAndCheckpoints, 10)
|
||||
for i := 0; i < len(blks); i++ {
|
||||
@@ -693,26 +670,26 @@ func TestForkChoice_UpdateCheckpoints(t *testing.T) {
|
||||
fcs.store.finalizedCheckpoint = tt.finalized
|
||||
fcs.store.genesisTime = uint64(time.Now().Unix()) - uint64(tt.currentSlot)*params.BeaconConfig().SecondsPerSlot
|
||||
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
|
||||
[32]byte{}, [32]byte{}, tt.finalized.Epoch, tt.finalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 64, [32]byte{'j'},
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'j'},
|
||||
[32]byte{'f'}, [32]byte{}, tt.justified.Epoch, tt.finalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 96, [32]byte{'b'},
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'b'},
|
||||
[32]byte{'j'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 96, [32]byte{'c'},
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'c'},
|
||||
[32]byte{'f'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 65, [32]byte{'h'},
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 65, [32]byte{'h'},
|
||||
[32]byte{'f'}, [32]byte{}, tt.newFinalized.Epoch, tt.newFinalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
// restart justifications cause insertion messed it up
|
||||
fcs.store.justifiedCheckpoint = tt.justified
|
||||
fcs.store.finalizedCheckpoint = tt.finalized
|
||||
@@ -738,9 +715,9 @@ func TestWeight(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
|
||||
root := [32]byte{'a'}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
require.Equal(t, true, ok)
|
||||
@@ -770,9 +747,9 @@ func TestForkChoice_UnrealizedJustifiedPayloadBlockHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(0, 0)
|
||||
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
f.store.unrealizedJustifiedCheckpoint.Root = [32]byte{'a'}
|
||||
got := f.UnrealizedJustifiedPayloadBlockHash()
|
||||
@@ -783,90 +760,90 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
// No Node
|
||||
viable, err := f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root()})
|
||||
viable, err := f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// No Children
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 0})
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 0})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 2})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
st, blk2, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, blk.Root(), [32]byte{'B'}, 0, 0)
|
||||
st, bRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk2))
|
||||
require.NoError(t, f.InsertNode(ctx, st, bRoot))
|
||||
|
||||
// Epoch start
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root()})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// No Children but impossible checkpoint
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root()})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
st, blk3, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, blk2.Root(), [32]byte{'C'}, 0, 0)
|
||||
st, cRoot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, bRoot, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk3))
|
||||
require.NoError(t, f.InsertNode(ctx, st, cRoot))
|
||||
|
||||
// Children in same epoch
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root()})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
st, blk4, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, blk2.Root(), [32]byte{'D'}, 0, 0)
|
||||
st, dRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, bRoot, [32]byte{'D'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk4))
|
||||
require.NoError(t, f.InsertNode(ctx, st, dRoot))
|
||||
|
||||
// Children in next epoch but boundary
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root()})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// Boundary block
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk4.Root(), Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: dRoot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk4.Root(), Epoch: 0})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: dRoot, Epoch: 0})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// Children in next epoch
|
||||
st, blk5, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, blk2.Root(), [32]byte{'E'}, 0, 0)
|
||||
st, eRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, bRoot, [32]byte{'E'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk5))
|
||||
require.NoError(t, f.InsertNode(ctx, st, eRoot))
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
}
|
||||
@@ -874,14 +851,14 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
|
||||
func TestForkChoiceSlot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, blk, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
// No Node
|
||||
_, err = f.Slot(blk.Root())
|
||||
_, err = f.Slot(root)
|
||||
require.ErrorIs(t, ErrNilNode, err)
|
||||
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
slot, err := f.Slot(blk.Root())
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
slot, err := f.Slot(root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Slot(3), slot)
|
||||
}
|
||||
@@ -890,16 +867,16 @@ func TestForkchoiceParentRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
root1 := [32]byte{'a'}
|
||||
st, blk, err := prepareForkchoiceState(ctx, 3, root1, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 3, root1, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
root2 := [32]byte{'b'}
|
||||
st, blk, err = prepareForkchoiceState(ctx, 3, root2, root1, [32]byte{'A'}, 0, 0)
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, root2, root1, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
root, err := f.ParentRoot(root2)
|
||||
root, err = f.ParentRoot(root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root1, root)
|
||||
|
||||
@@ -915,12 +892,12 @@ func TestForkchoiceParentRoot(t *testing.T) {
|
||||
func TestForkChoice_CleanupInserting(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2)
|
||||
f.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) {
|
||||
return f.justifiedBalances, errors.New("mock err")
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, f.InsertNode(ctx, st, roblock))
|
||||
require.Equal(t, false, f.HasNode(roblock.Root()))
|
||||
require.NotNil(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.Equal(t, false, f.HasNode(blkRoot))
|
||||
}
|
||||
|
||||
@@ -14,15 +14,15 @@ import (
|
||||
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
@@ -41,15 +41,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
@@ -72,9 +72,9 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is not viable.
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 3)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
// Verify parent's best child and best descendant are `none`.
|
||||
s := f.store
|
||||
@@ -87,9 +87,9 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is the best descendant
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
s := f.store
|
||||
assert.Equal(t, 1, len(s.treeRootNode.children))
|
||||
@@ -100,12 +100,12 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is the best descendant
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
@@ -120,12 +120,12 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is the best descendant
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
@@ -159,21 +159,21 @@ func TestNode_ViableForHead(t *testing.T) {
|
||||
func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
f := setup(4, 3)
|
||||
ctx := context.Background()
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 4, 3)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 4, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
|
||||
@@ -192,28 +192,28 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
// \
|
||||
// -- 5 (true)
|
||||
//
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[1] = f.store.nodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[1] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[2] = f.store.nodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[2] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[3] = f.store.nodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[3] = f.store.nodeByRoot[blkRoot]
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[4] = f.store.nodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[4] = f.store.nodeByRoot[blkRoot]
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[5] = f.store.nodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[5] = f.store.nodeByRoot[blkRoot]
|
||||
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
@@ -266,9 +266,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
driftGenesisTime(f, 1, 1)
|
||||
root := [32]byte{'a'}
|
||||
f.justifiedBalances = []uint64{10}
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
@@ -283,9 +283,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
// late block
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
root = [32]byte{'b'}
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, root, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, root, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
@@ -299,9 +299,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
// very late block
|
||||
driftGenesisTime(f, 3, ProcessAttestationsThreshold+1)
|
||||
root = [32]byte{'c'}
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
@@ -314,9 +314,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
|
||||
// block from the future
|
||||
root = [32]byte{'d'}
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, root, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, root, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
|
||||
@@ -484,23 +484,23 @@ func TestForkChoice_missingProposerBoostRoots(t *testing.T) {
|
||||
}
|
||||
f.justifiedBalances = balances
|
||||
driftGenesisTime(f, 1, 0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
|
||||
f.store.previousProposerBoostRoot = [32]byte{'p'}
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
require.Equal(t, root, headRoot)
|
||||
require.Equal(t, [32]byte{'r'}, f.store.proposerBoostRoot)
|
||||
|
||||
f.store.proposerBoostRoot = [32]byte{'p'}
|
||||
driftGenesisTime(f, 3, 0)
|
||||
st, blk, err = prepareForkchoiceState(ctx, 2, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
require.Equal(t, root, headRoot)
|
||||
require.Equal(t, [32]byte{'p'}, f.store.proposerBoostRoot)
|
||||
}
|
||||
|
||||
@@ -19,23 +19,23 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
ctx := context.Background()
|
||||
driftGenesisTime(f, 1, 0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
attesters := make([]uint64, f.numActiveValidators-64)
|
||||
for i := range attesters {
|
||||
attesters[i] = uint64(i + 64)
|
||||
}
|
||||
f.ProcessAttestation(ctx, attesters, blk.Root(), 0)
|
||||
f.ProcessAttestation(ctx, attesters, root, 0)
|
||||
|
||||
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
st, blk, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
require.Equal(t, root, headRoot)
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
require.Equal(t, true, f.ShouldOverrideFCU())
|
||||
|
||||
@@ -117,23 +117,23 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
driftGenesisTime(f, 1, 0)
|
||||
parentRoot := [32]byte{'a'}
|
||||
st, blk, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
attesters := make([]uint64, f.numActiveValidators-64)
|
||||
for i := range attesters {
|
||||
attesters[i] = uint64(i + 64)
|
||||
}
|
||||
f.ProcessAttestation(ctx, attesters, blk.Root(), 0)
|
||||
f.ProcessAttestation(ctx, attesters, root, 0)
|
||||
|
||||
driftGenesisTime(f, 3, 1)
|
||||
childRoot := [32]byte{'b'}
|
||||
st, blk, err = prepareForkchoiceState(ctx, 2, childRoot, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, childRoot, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
require.Equal(t, root, headRoot)
|
||||
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
f.store.headNode.timestamp -= params.BeaconConfig().SecondsPerSlot - orphanLateBlockFirstThreshold
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
|
||||
@@ -8,10 +8,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -65,24 +63,12 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
// insert registers a new block node to the fork choice store's node list.
|
||||
// It then updates the new node's parent with the best child and descendant node.
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
roblock consensus_blocks.ROBlock,
|
||||
slot primitives.Slot,
|
||||
root, parentRoot, payloadHash [fieldparams.RootLength]byte,
|
||||
justifiedEpoch, finalizedEpoch primitives.Epoch) (*Node, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
root := roblock.Root()
|
||||
block := roblock.Block()
|
||||
slot := block.Slot()
|
||||
parentRoot := block.ParentRoot()
|
||||
var payloadHash [32]byte
|
||||
if block.Version() >= version.Bellatrix {
|
||||
execution, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(payloadHash[:], execution.BlockHash())
|
||||
}
|
||||
|
||||
// Return if the block has been inserted into Store before.
|
||||
if n, ok := s.nodeByRoot[root]; ok {
|
||||
return n, nil
|
||||
|
||||
@@ -26,9 +26,9 @@ func TestStore_FinalizedEpoch(t *testing.T) {
|
||||
|
||||
func TestStore_NodeCount(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
state, blk, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(context.Background(), state, blk))
|
||||
require.NoError(t, f.InsertNode(context.Background(), state, blkRoot))
|
||||
require.Equal(t, 2, f.NodeCount())
|
||||
}
|
||||
|
||||
@@ -133,10 +133,7 @@ func TestStore_Insert(t *testing.T) {
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 0}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload, justifiedCheckpoint: jc, finalizedCheckpoint: fc, highestReceivedNode: &Node{}}
|
||||
payloadHash := [32]byte{'a'}
|
||||
ctx := context.Background()
|
||||
_, blk, err := prepareForkchoiceState(ctx, 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
_, err := s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
|
||||
assert.Equal(t, (*Node)(nil), treeRootNode.parent, "Incorrect parent")
|
||||
@@ -330,10 +327,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
|
||||
// Make sure it doesn't underflow
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-1*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
ctx := context.Background()
|
||||
_, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
_, err := s.insert(context.Background(), 1, [32]byte{'a'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
count, err := f.ReceivedBlocksLastEpoch()
|
||||
require.NoError(t, err)
|
||||
@@ -343,9 +337,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
|
||||
// 64
|
||||
// Received block last epoch is 1
|
||||
_, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'A'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
_, err = s.insert(context.Background(), 64, [32]byte{'A'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration((-64*int64(params.BeaconConfig().SecondsPerSlot))-1) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
@@ -356,9 +348,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
|
||||
// 64 65
|
||||
// Received block last epoch is 2
|
||||
_, blk, err = prepareForkchoiceState(ctx, 65, [32]byte{'B'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
_, err = s.insert(context.Background(), 65, [32]byte{'B'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-66*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
@@ -369,9 +359,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
|
||||
// 64 65 66
|
||||
// Received block last epoch is 3
|
||||
_, blk, err = prepareForkchoiceState(ctx, 66, [32]byte{'C'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
_, err = s.insert(context.Background(), 66, [32]byte{'C'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-66*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
@@ -382,9 +370,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
// 64 65 66
|
||||
// 98
|
||||
// Received block last epoch is 1
|
||||
_, blk, err = prepareForkchoiceState(ctx, 98, [32]byte{'D'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
_, err = s.insert(context.Background(), 98, [32]byte{'D'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-98*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
@@ -396,9 +382,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
// 98
|
||||
// 132
|
||||
// Received block last epoch is 1
|
||||
_, blk, err = prepareForkchoiceState(ctx, 132, [32]byte{'E'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
_, err = s.insert(context.Background(), 132, [32]byte{'E'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
@@ -411,9 +395,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
// 132
|
||||
// 99
|
||||
// Received block last epoch is still 1. 99 is outside the window
|
||||
_, blk, err = prepareForkchoiceState(ctx, 99, [32]byte{'F'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
_, err = s.insert(context.Background(), 99, [32]byte{'F'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
@@ -426,9 +408,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
// 132
|
||||
// 99 100
|
||||
// Received block last epoch is still 1. 100 is at the same position as 132
|
||||
_, blk, err = prepareForkchoiceState(ctx, 100, [32]byte{'G'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
_, err = s.insert(context.Background(), 100, [32]byte{'G'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
@@ -441,9 +421,7 @@ func TestForkChoice_ReceivedBlocksLastEpoch(t *testing.T) {
|
||||
// 132
|
||||
// 99 100 101
|
||||
// Received block last epoch is 2. 101 is within the window
|
||||
_, blk, err = prepareForkchoiceState(ctx, 101, [32]byte{'H'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
_, err = s.insert(ctx, blk, 1, 1)
|
||||
_, err = s.insert(context.Background(), 101, [32]byte{'H'}, b, b, 1, 1)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = uint64(time.Now().Add(time.Duration(-132*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second).Unix())
|
||||
count, err = f.ReceivedBlocksLastEpoch()
|
||||
@@ -465,94 +443,94 @@ func TestStore_TargetRootForEpoch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
state, blk, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
target, err := f.TargetRootForEpoch(blk.Root(), 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
target, err := f.TargetRootForEpoch(blkRoot, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blk.Root())
|
||||
require.Equal(t, target, blkRoot)
|
||||
|
||||
state, blk1, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'b'}, blk.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, root1, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'b'}, blkRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk1))
|
||||
target, err = f.TargetRootForEpoch(blk1.Root(), 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root1))
|
||||
target, err = f.TargetRootForEpoch(root1, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blk.Root())
|
||||
require.Equal(t, target, blkRoot)
|
||||
|
||||
// Insert a block for the next epoch (missed slot 0)
|
||||
|
||||
state, blk2, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'c'}, blk1.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, root2, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'c'}, root1, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk2))
|
||||
target, err = f.TargetRootForEpoch(blk2.Root(), 2)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root2))
|
||||
target, err = f.TargetRootForEpoch(root2, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blk1.Root())
|
||||
require.Equal(t, target, root1)
|
||||
|
||||
state, blk3, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+2, [32]byte{'d'}, blk2.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, root3, err := prepareForkchoiceState(ctx, 2*params.BeaconConfig().SlotsPerEpoch+2, [32]byte{'d'}, root2, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk3))
|
||||
target, err = f.TargetRootForEpoch(blk2.Root(), 2)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root3))
|
||||
target, err = f.TargetRootForEpoch(root2, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blk1.Root())
|
||||
require.Equal(t, target, root1)
|
||||
|
||||
// Prune finalization
|
||||
s := f.store
|
||||
s.finalizedCheckpoint.Root = blk1.Root()
|
||||
s.finalizedCheckpoint.Root = root1
|
||||
require.NoError(t, s.prune(ctx))
|
||||
target, err = f.TargetRootForEpoch(blk1.Root(), 1)
|
||||
target, err = f.TargetRootForEpoch(root1, 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{}, target)
|
||||
|
||||
// Insert a block for next epoch (slot 0 present)
|
||||
|
||||
state, blk4, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch, [32]byte{'e'}, blk1.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, root4, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch, [32]byte{'e'}, root1, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk4))
|
||||
target, err = f.TargetRootForEpoch(blk4.Root(), 3)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root4))
|
||||
target, err = f.TargetRootForEpoch(root4, 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blk4.Root())
|
||||
require.Equal(t, target, root4)
|
||||
|
||||
state, blk5, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'f'}, blk4.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, root5, err := prepareForkchoiceState(ctx, 3*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'f'}, root4, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk5))
|
||||
target, err = f.TargetRootForEpoch(blk5.Root(), 3)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root5))
|
||||
target, err = f.TargetRootForEpoch(root5, 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blk4.Root())
|
||||
require.Equal(t, target, root4)
|
||||
|
||||
// Target root where the target epoch is same or ahead of the block slot
|
||||
target, err = f.TargetRootForEpoch(blk5.Root(), 4)
|
||||
target, err = f.TargetRootForEpoch(root5, 4)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blk5.Root())
|
||||
require.Equal(t, target, root5)
|
||||
|
||||
// Target root where the target epoch is two epochs ago
|
||||
target, err = f.TargetRootForEpoch(blk5.Root(), 2)
|
||||
target, err = f.TargetRootForEpoch(root5, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk1.Root(), target) // the parent of root4 in epoch 3 is root 1 in epoch 1
|
||||
require.Equal(t, root1, target) // the parent of root4 in epoch 3 is root 1 in epoch 1
|
||||
|
||||
// Target root where the target is two epochs ago, slot 0 was missed
|
||||
state, blk6, err := prepareForkchoiceState(ctx, 4*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'g'}, blk5.Root(), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, root6, err := prepareForkchoiceState(ctx, 4*params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'g'}, root5, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk6))
|
||||
target, err = f.TargetRootForEpoch(blk6.Root(), 4)
|
||||
require.NoError(t, f.InsertNode(ctx, state, root6))
|
||||
target, err = f.TargetRootForEpoch(root6, 4)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blk5.Root())
|
||||
target, err = f.TargetRootForEpoch(blk6.Root(), 2)
|
||||
require.Equal(t, target, root5)
|
||||
target, err = f.TargetRootForEpoch(root6, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, target, blk1.Root())
|
||||
require.Equal(t, target, root1)
|
||||
|
||||
// Prune finalization
|
||||
s.finalizedCheckpoint.Root = blk4.Root()
|
||||
s.finalizedCheckpoint.Root = root4
|
||||
require.NoError(t, s.prune(ctx))
|
||||
target, err = f.TargetRootForEpoch(blk4.Root(), 3)
|
||||
target, err = f.TargetRootForEpoch(root4, 3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blk4.Root(), target)
|
||||
require.Equal(t, root4, target)
|
||||
}
|
||||
|
||||
func TestStore_CleanupInserting(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, f.InsertNode(ctx, st, blk))
|
||||
require.Equal(t, false, f.HasNode(blk.Root()))
|
||||
require.NotNil(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.Equal(t, false, f.HasNode(blkRoot))
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -42,7 +41,7 @@ type HeadRetriever interface {
|
||||
|
||||
// BlockProcessor processes the block that's used for accounting fork choice.
|
||||
type BlockProcessor interface {
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
InsertChain(context.Context, []*forkchoicetypes.BlockAndCheckpoints) error
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
],
|
||||
|
||||
@@ -2,7 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -17,7 +17,7 @@ type Checkpoint struct {
|
||||
|
||||
// BlockAndCheckpoints to call the InsertOptimisticChain function
|
||||
type BlockAndCheckpoints struct {
|
||||
Block consensus_blocks.ROBlock
|
||||
Block interfaces.ReadOnlyBeaconBlock
|
||||
JustifiedCheckpoint *ethpb.Checkpoint
|
||||
FinalizedCheckpoint *ethpb.Checkpoint
|
||||
}
|
||||
|
||||
@@ -796,7 +796,6 @@ func (b *BeaconNode) registerPOWChainService() error {
|
||||
execution.WithBeaconNodeStatsUpdater(bs),
|
||||
execution.WithFinalizedStateAtStartup(b.finalizedStateAtStartUp),
|
||||
execution.WithJwtId(b.cliCtx.String(flags.JwtId.Name)),
|
||||
execution.WithVerifierWaiter(b.verifyInitWaiter),
|
||||
)
|
||||
web3Service, err := execution.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
@@ -839,7 +838,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
|
||||
regularsync.WithStateGen(b.stateGen),
|
||||
regularsync.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
regularsync.WithSlasherBlockHeadersFeed(b.slasherBlockHeadersFeed),
|
||||
regularsync.WithReconstructor(web3Service),
|
||||
regularsync.WithPayloadReconstructor(web3Service),
|
||||
regularsync.WithClockWaiter(b.clockWaiter),
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
@@ -954,55 +953,55 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
|
||||
p2pService := b.fetchP2P()
|
||||
rpcService := rpc.NewService(b.ctx, &rpc.Config{
|
||||
ExecutionEngineCaller: web3Service,
|
||||
ExecutionReconstructor: web3Service,
|
||||
Host: host,
|
||||
Port: port,
|
||||
BeaconMonitoringHost: beaconMonitoringHost,
|
||||
BeaconMonitoringPort: beaconMonitoringPort,
|
||||
CertFlag: cert,
|
||||
KeyFlag: key,
|
||||
BeaconDB: b.db,
|
||||
Broadcaster: p2pService,
|
||||
PeersFetcher: p2pService,
|
||||
PeerManager: p2pService,
|
||||
MetadataProvider: p2pService,
|
||||
ChainInfoFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
CanonicalFetcher: chainService,
|
||||
ForkFetcher: chainService,
|
||||
ForkchoiceFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
BLSChangesPool: b.blsToExecPool,
|
||||
SyncCommitteeObjectPool: b.syncCommitteePool,
|
||||
ExecutionChainService: web3Service,
|
||||
ExecutionChainInfoFetcher: web3Service,
|
||||
ChainStartFetcher: chainStartFetcher,
|
||||
MockEth1Votes: mockEth1DataVotes,
|
||||
SyncService: syncService,
|
||||
DepositFetcher: depositFetcher,
|
||||
PendingDepositFetcher: b.depositCache,
|
||||
BlockNotifier: b,
|
||||
StateNotifier: b,
|
||||
OperationNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
ExecutionEngineCaller: web3Service,
|
||||
ExecutionPayloadReconstructor: web3Service,
|
||||
Host: host,
|
||||
Port: port,
|
||||
BeaconMonitoringHost: beaconMonitoringHost,
|
||||
BeaconMonitoringPort: beaconMonitoringPort,
|
||||
CertFlag: cert,
|
||||
KeyFlag: key,
|
||||
BeaconDB: b.db,
|
||||
Broadcaster: p2pService,
|
||||
PeersFetcher: p2pService,
|
||||
PeerManager: p2pService,
|
||||
MetadataProvider: p2pService,
|
||||
ChainInfoFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
CanonicalFetcher: chainService,
|
||||
ForkFetcher: chainService,
|
||||
ForkchoiceFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
AttestationsPool: b.attestationPool,
|
||||
ExitPool: b.exitPool,
|
||||
SlashingsPool: b.slashingsPool,
|
||||
BLSChangesPool: b.blsToExecPool,
|
||||
SyncCommitteeObjectPool: b.syncCommitteePool,
|
||||
ExecutionChainService: web3Service,
|
||||
ExecutionChainInfoFetcher: web3Service,
|
||||
ChainStartFetcher: chainStartFetcher,
|
||||
MockEth1Votes: mockEth1DataVotes,
|
||||
SyncService: syncService,
|
||||
DepositFetcher: depositFetcher,
|
||||
PendingDepositFetcher: b.depositCache,
|
||||
BlockNotifier: b,
|
||||
StateNotifier: b,
|
||||
OperationNotifier: b,
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
BlockBuilder: b.fetchBuilderService(),
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
|
||||
@@ -209,7 +209,7 @@ func TestService_BroadcastAttestation(t *testing.T) {
|
||||
|
||||
func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
// Setup bootnode.
|
||||
cfg := &Config{}
|
||||
cfg := &Config{PingInterval: testPingInterval}
|
||||
port := 2000
|
||||
cfg.UDPPort = uint(port)
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
@@ -241,12 +241,16 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
cfg = &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
MaxPeers: 30,
|
||||
PingInterval: testPingInterval,
|
||||
}
|
||||
// Setup 2 different hosts
|
||||
for i := 1; i <= 2; i++ {
|
||||
h, pkey, ipAddr := createHost(t, port+i)
|
||||
cfg.UDPPort = uint(port + i)
|
||||
cfg.TCPPort = uint(port + i)
|
||||
if len(listeners) > 0 {
|
||||
cfg.Discv5BootStrapAddrs = append(cfg.Discv5BootStrapAddrs, listeners[len(listeners)-1].Self().String())
|
||||
}
|
||||
s := &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
@@ -15,6 +17,7 @@ type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
DisableLivenessCheck bool
|
||||
StaticPeers []string
|
||||
Discv5BootStrapAddrs []string
|
||||
RelayNodeAddr string
|
||||
@@ -27,6 +30,7 @@ type Config struct {
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
PingInterval time.Duration
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
|
||||
@@ -330,8 +330,10 @@ func (s *Service) createListener(
|
||||
}
|
||||
|
||||
dv5Cfg := discover.Config{
|
||||
PrivateKey: privKey,
|
||||
Bootnodes: bootNodes,
|
||||
PrivateKey: privKey,
|
||||
Bootnodes: bootNodes,
|
||||
PingInterval: s.cfg.PingInterval,
|
||||
NoFindnodeLivenessCheck: s.cfg.DisableLivenessCheck,
|
||||
}
|
||||
|
||||
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
|
||||
|
||||
@@ -85,7 +85,7 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
s := &Service{
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
@@ -93,6 +93,10 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
@@ -101,6 +105,8 @@ func TestStartDiscV5_DiscoverAllPeers(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
}
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
s = &Service{
|
||||
|
||||
@@ -34,8 +34,10 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
@@ -44,11 +46,17 @@ func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
@@ -124,7 +132,7 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
s := &Service{
|
||||
cfg: &Config{UDPPort: uint(port)},
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
@@ -132,10 +140,16 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
@@ -143,7 +157,6 @@ func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
c := params.BeaconConfig().Copy()
|
||||
nextForkEpoch := primitives.Epoch(i)
|
||||
c.ForkVersionSchedule[[4]byte{'A', 'B', 'C', 'D'}] = nextForkEpoch
|
||||
|
||||
@@ -28,6 +28,8 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
const testPingInterval = 100 * time.Millisecond
|
||||
|
||||
type mockListener struct {
|
||||
localNode *enode.LocalNode
|
||||
}
|
||||
@@ -186,7 +188,7 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
// Setup bootnode.
|
||||
notifier := &mock.MockStateNotifier{}
|
||||
cfg := &Config{StateNotifier: notifier}
|
||||
cfg := &Config{StateNotifier: notifier, PingInterval: testPingInterval, DisableLivenessCheck: true}
|
||||
port := 2000
|
||||
cfg.UDPPort = uint(port)
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
@@ -202,6 +204,10 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Use shorter period for testing.
|
||||
currentPeriod := pollingPeriod
|
||||
pollingPeriod = 1 * time.Second
|
||||
@@ -217,6 +223,8 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
cs := startup.NewClockSynchronizer()
|
||||
cfg = &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
MaxPeers: 30,
|
||||
ClockWaiter: cs,
|
||||
}
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
genesisTime := time.Now()
|
||||
|
||||
bootNodeService := &Service{
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000},
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
}
|
||||
@@ -78,6 +78,10 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
bootNodeENR := bootListener.Self().String()
|
||||
|
||||
// Create 3 nodes, each subscribed to a different subnet.
|
||||
@@ -92,6 +96,8 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
UDPPort: uint(2000 + i),
|
||||
TCPPort: uint(3000 + i),
|
||||
QUICPort: uint(3000 + i),
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
@@ -133,6 +139,8 @@ func TestStartDiscV5_FindPeersWithSubnet(t *testing.T) {
|
||||
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNodeENR},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
MaxPeers: 30,
|
||||
UDPPort: 2010,
|
||||
TCPPort: 3010,
|
||||
|
||||
@@ -869,9 +869,16 @@ func (s *Service) ValidatorActiveSetChanges(
|
||||
}
|
||||
}
|
||||
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, requestedState, coreTime.CurrentEpoch(requestedState))
|
||||
if err != nil {
|
||||
return nil, &RpcError{
|
||||
Err: errors.Wrap(err, "could not get active validator count"),
|
||||
Reason: Internal,
|
||||
}
|
||||
}
|
||||
vs := requestedState.Validators()
|
||||
activatedIndices := validators.ActivatedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs)
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs)
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs, activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, &RpcError{
|
||||
Err: errors.Wrap(err, "could not determine exited validator indices"),
|
||||
@@ -879,7 +886,7 @@ func (s *Service) ValidatorActiveSetChanges(
|
||||
}
|
||||
}
|
||||
slashedIndices := validators.SlashedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs)
|
||||
ejectedIndices, err := validators.EjectedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs)
|
||||
ejectedIndices, err := validators.EjectedValidatorIndices(coreTime.CurrentEpoch(requestedState), vs, activeValidatorCount)
|
||||
if err != nil {
|
||||
return nil, &RpcError{
|
||||
Err: errors.Wrap(err, "could not determine ejected validator indices"),
|
||||
|
||||
@@ -464,30 +464,30 @@ func (s *Service) beaconEndpoints(
|
||||
coreService *core.Service,
|
||||
) []endpoint {
|
||||
server := &beacon.Server{
|
||||
CanonicalHistory: ch,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlockNotifier: s.cfg.BlockNotifier,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
StateGenService: s.cfg.StateGen,
|
||||
Stater: stater,
|
||||
Blocker: blocker,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
VoluntaryExitsPool: s.cfg.ExitPool,
|
||||
V1Alpha1ValidatorServer: validatorServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
ExecutionReconstructor: s.cfg.ExecutionReconstructor,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
CoreService: coreService,
|
||||
CanonicalHistory: ch,
|
||||
BeaconDB: s.cfg.BeaconDB,
|
||||
AttestationsPool: s.cfg.AttestationsPool,
|
||||
SlashingsPool: s.cfg.SlashingsPool,
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlockNotifier: s.cfg.BlockNotifier,
|
||||
OperationNotifier: s.cfg.OperationNotifier,
|
||||
Broadcaster: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
StateGenService: s.cfg.StateGen,
|
||||
Stater: stater,
|
||||
Blocker: blocker,
|
||||
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
TimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
VoluntaryExitsPool: s.cfg.ExitPool,
|
||||
V1Alpha1ValidatorServer: validatorServer,
|
||||
SyncChecker: s.cfg.SyncService,
|
||||
ExecutionPayloadReconstructor: s.cfg.ExecutionPayloadReconstructor,
|
||||
BLSChangesPool: s.cfg.BLSChangesPool,
|
||||
FinalizationFetcher: s.cfg.FinalizationFetcher,
|
||||
ForkchoiceFetcher: s.cfg.ForkchoiceFetcher,
|
||||
CoreService: coreService,
|
||||
}
|
||||
|
||||
const namespace = "beacon"
|
||||
@@ -631,15 +631,6 @@ func (s *Service) beaconEndpoints(
|
||||
handler: server.ListAttestations,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v2/beacon/pool/attestations",
|
||||
name: namespace + ".ListAttestationsV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
},
|
||||
handler: server.ListAttestationsV2,
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
template: "/eth/v1/beacon/pool/attestations",
|
||||
name: namespace + ".SubmitAttestations",
|
||||
|
||||
@@ -41,7 +41,6 @@ func Test_endpoints(t *testing.T) {
|
||||
"/eth/v1/beacon/deposit_snapshot": {http.MethodGet},
|
||||
"/eth/v1/beacon/blinded_blocks/{block_id}": {http.MethodGet},
|
||||
"/eth/v1/beacon/pool/attestations": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v2/beacon/pool/attestations": {http.MethodGet},
|
||||
"/eth/v1/beacon/pool/attester_slashings": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v2/beacon/pool/attester_slashings": {http.MethodGet, http.MethodPost},
|
||||
"/eth/v1/beacon/pool/proposer_slashings": {http.MethodGet, http.MethodPost},
|
||||
|
||||
@@ -65,7 +65,7 @@ func (s *Server) GetBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Deal with block unblinding.
|
||||
if blk.Version() >= version.Bellatrix && blk.IsBlinded() {
|
||||
blk, err = s.ExecutionReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
blk, err = s.ExecutionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, errors.Wrapf(err, "could not reconstruct full execution payload to create signed beacon block").Error(), http.StatusBadRequest)
|
||||
return
|
||||
@@ -269,7 +269,6 @@ func (s *Server) GetBlockAttestationsV2(w http.ResponseWriter, r *http.Request)
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, root),
|
||||
Data: attBytes,
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(v))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
|
||||
@@ -55,122 +55,39 @@ func (s *Server) ListAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
attestations = append(attestations, unaggAtts...)
|
||||
isEmptyReq := rawSlot == "" && rawCommitteeIndex == ""
|
||||
if isEmptyReq {
|
||||
allAtts := make([]*structs.Attestation, len(attestations))
|
||||
for i, att := range attestations {
|
||||
a, ok := att.(*eth.Attestation)
|
||||
if ok {
|
||||
allAtts[i] = structs.AttFromConsensus(a)
|
||||
} else {
|
||||
httputil.HandleError(w, fmt.Sprintf("unable to convert attestations of type %T", att), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
httputil.WriteJson(w, &structs.ListAttestationsResponse{Data: allAtts})
|
||||
return
|
||||
}
|
||||
|
||||
bothDefined := rawSlot != "" && rawCommitteeIndex != ""
|
||||
filteredAtts := make([]*structs.Attestation, 0, len(attestations))
|
||||
for _, a := range attestations {
|
||||
var includeAttestation bool
|
||||
att, ok := a.(*eth.Attestation)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert attestation of type %T", a), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
includeAttestation = shouldIncludeAttestation(att.GetData(), rawSlot, slot, rawCommitteeIndex, committeeIndex)
|
||||
if includeAttestation {
|
||||
attStruct := structs.AttFromConsensus(att)
|
||||
filteredAtts = append(filteredAtts, attStruct)
|
||||
}
|
||||
}
|
||||
|
||||
attsData, err := json.Marshal(filteredAtts)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not marshal attestations: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, &structs.ListAttestationsResponse{
|
||||
Data: attsData,
|
||||
})
|
||||
}
|
||||
|
||||
// ListAttestationsV2 retrieves attestations known by the node but
|
||||
// not necessarily incorporated into any block. Allows filtering by committee index or slot.
|
||||
func (s *Server) ListAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.ListAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
rawSlot, slot, ok := shared.UintFromQuery(w, r, "slot", false)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
rawCommitteeIndex, committeeIndex, ok := shared.UintFromQuery(w, r, "committee_index", false)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
headState, err := s.ChainInfoFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
attestations := s.AttestationsPool.AggregatedAttestations()
|
||||
unaggAtts, err := s.AttestationsPool.UnaggregatedAttestations()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get unaggregated attestations: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attestations = append(attestations, unaggAtts...)
|
||||
|
||||
filteredAtts := make([]interface{}, 0, len(attestations))
|
||||
for _, att := range attestations {
|
||||
var includeAttestation bool
|
||||
if headState.Version() >= version.Electra {
|
||||
attElectra, ok := att.(*eth.AttestationElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert attestation of type %T", att), http.StatusInternalServerError)
|
||||
committeeIndexMatch := rawCommitteeIndex != "" && att.GetData().CommitteeIndex == primitives.CommitteeIndex(committeeIndex)
|
||||
slotMatch := rawSlot != "" && att.GetData().Slot == primitives.Slot(slot)
|
||||
shouldAppend := (bothDefined && committeeIndexMatch && slotMatch) || (!bothDefined && (committeeIndexMatch || slotMatch))
|
||||
if shouldAppend {
|
||||
a, ok := att.(*eth.Attestation)
|
||||
if ok {
|
||||
filteredAtts = append(filteredAtts, structs.AttFromConsensus(a))
|
||||
} else {
|
||||
httputil.HandleError(w, fmt.Sprintf("unable to convert attestations of type %T", att), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
includeAttestation = shouldIncludeAttestation(attElectra.GetData(), rawSlot, slot, rawCommitteeIndex, committeeIndex)
|
||||
if includeAttestation {
|
||||
attStruct := structs.AttElectraFromConsensus(attElectra)
|
||||
filteredAtts = append(filteredAtts, attStruct)
|
||||
}
|
||||
} else {
|
||||
attOld, ok := att.(*eth.Attestation)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert attestation of type %T", att), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
includeAttestation = shouldIncludeAttestation(attOld.GetData(), rawSlot, slot, rawCommitteeIndex, committeeIndex)
|
||||
if includeAttestation {
|
||||
attStruct := structs.AttFromConsensus(attOld)
|
||||
filteredAtts = append(filteredAtts, attStruct)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
attsData, err := json.Marshal(filteredAtts)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not marshal attestations: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, &structs.ListAttestationsResponse{
|
||||
Version: version.String(headState.Version()),
|
||||
Data: attsData,
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to determine if an attestation should be included
|
||||
func shouldIncludeAttestation(
|
||||
data *eth.AttestationData,
|
||||
rawSlot string,
|
||||
slot uint64,
|
||||
rawCommitteeIndex string,
|
||||
committeeIndex uint64,
|
||||
) bool {
|
||||
committeeIndexMatch := true
|
||||
slotMatch := true
|
||||
if rawCommitteeIndex != "" && data.CommitteeIndex != primitives.CommitteeIndex(committeeIndex) {
|
||||
committeeIndexMatch = false
|
||||
}
|
||||
if rawSlot != "" && data.Slot != primitives.Slot(slot) {
|
||||
slotMatch = false
|
||||
}
|
||||
return committeeIndexMatch && slotMatch
|
||||
httputil.WriteJson(w, &structs.ListAttestationsResponse{Data: filteredAtts})
|
||||
}
|
||||
|
||||
// SubmitAttestations submits an attestation object to node. If the attestation passes all validation
|
||||
@@ -579,41 +496,36 @@ func (s *Server) GetAttesterSlashingsV2(w http.ResponseWriter, r *http.Request)
|
||||
httputil.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
var attStructs []interface{}
|
||||
sourceSlashings := s.SlashingsPool.PendingAttesterSlashings(ctx, headState, true /* return unlimited slashings */)
|
||||
|
||||
for _, slashing := range sourceSlashings {
|
||||
var attStruct interface{}
|
||||
if headState.Version() >= version.Electra {
|
||||
if slashing.Version() >= version.Electra {
|
||||
a, ok := slashing.(*eth.AttesterSlashingElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert slashing of type %T to an Electra slashing", slashing), http.StatusInternalServerError)
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert electra slashing of type %T to an Electra slashing", slashing), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct = structs.AttesterSlashingElectraFromConsensus(a)
|
||||
attStruct := structs.AttesterSlashingElectraFromConsensus(a)
|
||||
attStructs = append(attStructs, attStruct)
|
||||
} else {
|
||||
a, ok := slashing.(*eth.AttesterSlashing)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("Unable to convert slashing of type %T to a Phase0 slashing", slashing), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct = structs.AttesterSlashingFromConsensus(a)
|
||||
attStruct := structs.AttesterSlashingFromConsensus(a)
|
||||
attStructs = append(attStructs, attStruct)
|
||||
}
|
||||
attStructs = append(attStructs, attStruct)
|
||||
}
|
||||
|
||||
attBytes, err := json.Marshal(attStructs)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Failed to marshal slashing: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
resp := &structs.GetAttesterSlashingsResponse{
|
||||
Version: version.String(headState.Version()),
|
||||
Version: version.String(sourceSlashings[0].Version()),
|
||||
Data: attBytes,
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(headState.Version()))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
|
||||
@@ -114,352 +114,77 @@ func TestListAttestations(t *testing.T) {
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
}
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
s := &Server{
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{att1, att2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{att3, att4}))
|
||||
s := &Server{
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{att1, att2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{att3, att4}))
|
||||
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
url := "http://example.com"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
url := "http://example.com"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 4, len(atts))
|
||||
})
|
||||
t.Run("slot request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
}
|
||||
})
|
||||
t.Run("index request", func(t *testing.T) {
|
||||
url := "http://example.com?committee_index=4"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
})
|
||||
t.Run("both slot + index request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2&committee_index=4"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), &resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 1, len(atts))
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
})
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 4, len(resp.Data))
|
||||
})
|
||||
t.Run("V2", func(t *testing.T) {
|
||||
t.Run("Pre-Electra", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
}
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{att1, att2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{att3, att4}))
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
url := "http://example.com"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
t.Run("slot request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, a := range resp.Data {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
}
|
||||
})
|
||||
t.Run("index request", func(t *testing.T) {
|
||||
url := "http://example.com?committee_index=4"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 4, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
})
|
||||
t.Run("slot request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, a := range resp.Data {
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
})
|
||||
t.Run("both slot + index request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2&committee_index=4"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
}
|
||||
})
|
||||
t.Run("index request", func(t *testing.T) {
|
||||
url := "http://example.com?committee_index=4"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
})
|
||||
t.Run("both slot + index request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2&committee_index=4"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.Attestation
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 1, len(atts))
|
||||
assert.Equal(t, "phase0", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
})
|
||||
})
|
||||
t.Run("Post-Electra", func(t *testing.T) {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(1, true)
|
||||
attElectra1 := ðpbv1alpha1.AttestationElectra{
|
||||
AggregationBits: []byte{1, 10},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot1"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot1"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot1"), 32),
|
||||
},
|
||||
},
|
||||
CommitteeBits: cb,
|
||||
Signature: bytesutil.PadTo([]byte("signature1"), 96),
|
||||
}
|
||||
attElectra2 := ðpbv1alpha1.AttestationElectra{
|
||||
AggregationBits: []byte{1, 10},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot2"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot2"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot2"), 32),
|
||||
},
|
||||
},
|
||||
CommitteeBits: cb,
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
}
|
||||
attElectra3 := ðpbv1alpha1.AttestationElectra{
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 2,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot3"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot3"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot3"), 32),
|
||||
},
|
||||
},
|
||||
CommitteeBits: cb,
|
||||
Signature: bytesutil.PadTo([]byte("signature3"), 96),
|
||||
}
|
||||
attElectra4 := ðpbv1alpha1.AttestationElectra{
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 2,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 10,
|
||||
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
|
||||
},
|
||||
},
|
||||
CommitteeBits: cb,
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
}
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
AttestationsPool: attestations.NewPool(),
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
}
|
||||
require.NoError(t, s.AttestationsPool.SaveAggregatedAttestations([]ethpbv1alpha1.Att{attElectra1, attElectra2}))
|
||||
require.NoError(t, s.AttestationsPool.SaveUnaggregatedAttestations([]ethpbv1alpha1.Att{attElectra3, attElectra4}))
|
||||
|
||||
t.Run("empty request", func(t *testing.T) {
|
||||
url := "http://example.com"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 4, len(atts))
|
||||
assert.Equal(t, "electra", resp.Version)
|
||||
})
|
||||
t.Run("slot request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, "electra", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
}
|
||||
})
|
||||
t.Run("index request", func(t *testing.T) {
|
||||
url := "http://example.com?committee_index=4"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, "electra", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
})
|
||||
t.Run("both slot + index request", func(t *testing.T) {
|
||||
url := "http://example.com?slot=2&committee_index=4"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.ListAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var atts []*structs.AttestationElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &atts))
|
||||
assert.Equal(t, 1, len(atts))
|
||||
assert.Equal(t, "electra", resp.Version)
|
||||
for _, a := range atts {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
})
|
||||
})
|
||||
s.ListAttestations(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.ListAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 1, len(resp.Data))
|
||||
for _, a := range resp.Data {
|
||||
assert.Equal(t, "2", a.Data.Slot)
|
||||
assert.Equal(t, "4", a.Data.CommitteeIndex)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1406,147 +1131,93 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PreElectra, slashing2PreElectra}},
|
||||
}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PreElectra, slashing2PreElectra}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterSlashings(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
s.GetAttesterSlashings(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var slashings []*structs.AttesterSlashing
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
var slashings []*structs.AttesterSlashing
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
|
||||
ss, err := structs.AttesterSlashingsToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
ss, err := structs.AttesterSlashingsToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, slashing1PreElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PreElectra, ss[1])
|
||||
})
|
||||
t.Run("no slashings", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterSlashings(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var slashings []*structs.AttesterSlashing
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
require.Equal(t, 0, len(slashings))
|
||||
})
|
||||
require.DeepEqual(t, slashing1PreElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PreElectra, ss[1])
|
||||
})
|
||||
t.Run("V2", func(t *testing.T) {
|
||||
t.Run("post-electra-ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
t.Run("V2-post-electra", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PostElectra, slashing2PostElectra}},
|
||||
}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PostElectra, slashing2PostElectra}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, "electra", resp.Version)
|
||||
s.GetAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, "electra", resp.Version)
|
||||
|
||||
// Unmarshal resp.Data into a slice of slashings
|
||||
var slashings []*structs.AttesterSlashingElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
// Unmarshal resp.Data into a slice of slashings
|
||||
var slashings []*structs.AttesterSlashingElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
|
||||
ss, err := structs.AttesterSlashingsElectraToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
ss, err := structs.AttesterSlashingsElectraToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, slashing1PostElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PostElectra, ss[1])
|
||||
})
|
||||
t.Run("pre-electra-ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, slashing1PostElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PostElectra, ss[1])
|
||||
})
|
||||
t.Run("V2-pre-electra", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PreElectra, slashing2PreElectra}},
|
||||
}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PreElectra, slashing2PreElectra}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
s.GetAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
|
||||
var slashings []*structs.AttesterSlashing
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
var slashings []*structs.AttesterSlashing
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
|
||||
ss, err := structs.AttesterSlashingsToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
ss, err := structs.AttesterSlashingsToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, slashing1PreElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PreElectra, ss[1])
|
||||
})
|
||||
t.Run("no-slashings", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/beacon/pool/attester_slashings", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetAttesterSlashingsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, "electra", resp.Version)
|
||||
|
||||
// Unmarshal resp.Data into a slice of slashings
|
||||
var slashings []*structs.AttesterSlashingElectra
|
||||
require.NoError(t, json.Unmarshal(resp.Data, &slashings))
|
||||
require.Equal(t, 0, len(slashings))
|
||||
})
|
||||
require.DeepEqual(t, slashing1PreElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PreElectra, ss[1])
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -2939,15 +2939,13 @@ func TestValidateEquivocation(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(10))
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root")))
|
||||
require.NoError(t, err)
|
||||
fc := doublylinkedtree.New()
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, roblock))
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, bytesutil.ToBytes32([]byte("root"))))
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc},
|
||||
}
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
blk.SetSlot(st.Slot() + 1)
|
||||
|
||||
require.NoError(t, server.validateEquivocation(blk.Block()))
|
||||
@@ -2956,17 +2954,15 @@ func TestValidateEquivocation(t *testing.T) {
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(10))
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
blk.SetSlot(st.Slot())
|
||||
roblock, err := blocks.NewROBlockWithRoot(blk, bytesutil.ToBytes32([]byte("root")))
|
||||
require.NoError(t, err)
|
||||
|
||||
fc := doublylinkedtree.New()
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, roblock))
|
||||
require.NoError(t, fc.InsertNode(context.Background(), st, bytesutil.ToBytes32([]byte("root"))))
|
||||
server := &Server{
|
||||
ForkchoiceFetcher: &chainMock.ChainService{ForkChoiceStore: fc},
|
||||
}
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
blk.SetSlot(st.Slot())
|
||||
|
||||
err = server.validateEquivocation(blk.Block())
|
||||
assert.ErrorContains(t, "already exists", err)
|
||||
require.ErrorIs(t, err, errEquivocatedBlock)
|
||||
|
||||
@@ -24,28 +24,28 @@ import (
|
||||
// Server defines a server implementation of the gRPC Beacon Chain service,
|
||||
// providing RPC endpoints to access data relevant to the Ethereum Beacon Chain.
|
||||
type Server struct {
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlockNotifier blockfeed.Notifier
|
||||
OperationNotifier operation.Notifier
|
||||
Broadcaster p2p.Broadcaster
|
||||
AttestationsPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
VoluntaryExitsPool voluntaryexits.PoolManager
|
||||
StateGenService stategen.StateManager
|
||||
Stater lookup.Stater
|
||||
Blocker lookup.Blocker
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
V1Alpha1ValidatorServer eth.BeaconNodeValidatorServer
|
||||
SyncChecker sync.Checker
|
||||
CanonicalHistory *stategen.CanonicalHistory
|
||||
ExecutionReconstructor execution.Reconstructor
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
CoreService *core.Service
|
||||
BeaconDB db.ReadOnlyDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlockNotifier blockfeed.Notifier
|
||||
OperationNotifier operation.Notifier
|
||||
Broadcaster p2p.Broadcaster
|
||||
AttestationsPool attestations.Pool
|
||||
SlashingsPool slashings.PoolManager
|
||||
VoluntaryExitsPool voluntaryexits.PoolManager
|
||||
StateGenService stategen.StateManager
|
||||
Stater lookup.Stater
|
||||
Blocker lookup.Blocker
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
TimeFetcher blockchain.TimeFetcher
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
V1Alpha1ValidatorServer eth.BeaconNodeValidatorServer
|
||||
SyncChecker sync.Checker
|
||||
CanonicalHistory *stategen.CanonicalHistory
|
||||
ExecutionPayloadReconstructor execution.PayloadReconstructor
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
CoreService *core.Service
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"events.go",
|
||||
"log.go",
|
||||
"server.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/events",
|
||||
@@ -59,6 +58,5 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_r3labs_sse_v2//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const DefaultEventFeedDepth = 1000
|
||||
@@ -73,6 +74,13 @@ var (
|
||||
errWriterUnusable = errors.New("http response writer is unusable")
|
||||
)
|
||||
|
||||
// StreamingResponseWriter defines a type that can be used by the eventStreamer.
|
||||
// This must be an http.ResponseWriter that supports flushing and hijacking.
|
||||
type StreamingResponseWriter interface {
|
||||
http.ResponseWriter
|
||||
http.Flusher
|
||||
}
|
||||
|
||||
// The eventStreamer uses lazyReaders to defer serialization until the moment the value is ready to be written to the client.
|
||||
type lazyReader func() io.Reader
|
||||
|
||||
@@ -142,7 +150,6 @@ func newTopicRequest(topics []string) (*topicRequest, error) {
|
||||
// Servers may send SSE comments beginning with ':' for any purpose,
|
||||
// including to keep the event stream connection alive in the presence of proxy servers.
|
||||
func (s *Server) StreamEvents(w http.ResponseWriter, r *http.Request) {
|
||||
log.Debug("Starting StreamEvents handler")
|
||||
ctx, span := trace.StartSpan(r.Context(), "events.StreamEvents")
|
||||
defer span.End()
|
||||
|
||||
@@ -152,51 +159,47 @@ func (s *Server) StreamEvents(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
timeout := s.EventWriteTimeout
|
||||
if timeout == 0 {
|
||||
timeout = time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
sw, ok := w.(StreamingResponseWriter)
|
||||
if !ok {
|
||||
msg := "beacon node misconfiguration: http stack may not support required response handling features, like flushing"
|
||||
httputil.HandleError(w, msg, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
ka := s.KeepAliveInterval
|
||||
if ka == 0 {
|
||||
ka = timeout
|
||||
depth := s.EventFeedDepth
|
||||
if depth == 0 {
|
||||
depth = DefaultEventFeedDepth
|
||||
}
|
||||
buffSize := s.EventFeedDepth
|
||||
if buffSize == 0 {
|
||||
buffSize = DefaultEventFeedDepth
|
||||
es, err := newEventStreamer(depth, s.KeepAliveInterval)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
api.SetSSEHeaders(w)
|
||||
sw := newStreamingResponseController(w, timeout)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
es := newEventStreamer(buffSize, ka)
|
||||
|
||||
api.SetSSEHeaders(sw)
|
||||
go es.outboxWriteLoop(ctx, cancel, sw)
|
||||
if err := es.recvEventLoop(ctx, cancel, topics, s); err != nil {
|
||||
log.WithError(err).Debug("Shutting down StreamEvents handler.")
|
||||
}
|
||||
cleanupStart := time.Now()
|
||||
es.waitForExit()
|
||||
log.WithField("cleanup_wait", time.Since(cleanupStart)).Debug("streamEvents shutdown complete")
|
||||
}
|
||||
|
||||
func newEventStreamer(buffSize int, ka time.Duration) *eventStreamer {
|
||||
return &eventStreamer{
|
||||
outbox: make(chan lazyReader, buffSize),
|
||||
keepAlive: ka,
|
||||
openUntilExit: make(chan struct{}),
|
||||
func newEventStreamer(buffSize int, ka time.Duration) (*eventStreamer, error) {
|
||||
if ka == 0 {
|
||||
ka = time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
}
|
||||
return &eventStreamer{
|
||||
outbox: make(chan lazyReader, buffSize),
|
||||
keepAlive: ka,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type eventStreamer struct {
|
||||
outbox chan lazyReader
|
||||
keepAlive time.Duration
|
||||
openUntilExit chan struct{}
|
||||
outbox chan lazyReader
|
||||
keepAlive time.Duration
|
||||
}
|
||||
|
||||
func (es *eventStreamer) recvEventLoop(ctx context.Context, cancel context.CancelFunc, req *topicRequest, s *Server) error {
|
||||
defer close(es.outbox)
|
||||
defer cancel()
|
||||
eventsChan := make(chan *feed.Event, len(es.outbox))
|
||||
if req.needOpsFeed {
|
||||
opsSub := s.OperationNotifier.OperationFeed().Subscribe(eventsChan)
|
||||
@@ -225,6 +228,7 @@ func (es *eventStreamer) recvEventLoop(ctx context.Context, cancel context.Cance
|
||||
// channel should stay relatively empty, which gives this loop time to unsubscribe
|
||||
// and cleanup before the event stream channel fills and disrupts other readers.
|
||||
if err := es.safeWrite(ctx, lr); err != nil {
|
||||
cancel()
|
||||
// note: we could hijack the connection and close it here. Does that cause issues? What are the benefits?
|
||||
// A benefit of hijack and close is that it may force an error on the remote end, however just closing the context of the
|
||||
// http handler may be sufficient to cause the remote http response reader to close.
|
||||
@@ -261,13 +265,12 @@ func newlineReader() io.Reader {
|
||||
|
||||
// outboxWriteLoop runs in a separate goroutine. Its job is to write the values in the outbox to
|
||||
// the client as fast as the client can read them.
|
||||
func (es *eventStreamer) outboxWriteLoop(ctx context.Context, cancel context.CancelFunc, w *streamingResponseWriterController) {
|
||||
func (es *eventStreamer) outboxWriteLoop(ctx context.Context, cancel context.CancelFunc, w StreamingResponseWriter) {
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Event streamer shutting down due to error.")
|
||||
}
|
||||
es.exit()
|
||||
}()
|
||||
defer cancel()
|
||||
// Write a keepalive at the start to test the connection and simplify test setup.
|
||||
@@ -307,43 +310,18 @@ func (es *eventStreamer) outboxWriteLoop(ctx context.Context, cancel context.Can
|
||||
}
|
||||
}
|
||||
|
||||
func (es *eventStreamer) exit() {
|
||||
drained := 0
|
||||
for range es.outbox {
|
||||
drained += 1
|
||||
}
|
||||
log.WithField("undelivered_events", drained).Debug("Event stream outbox drained.")
|
||||
close(es.openUntilExit)
|
||||
}
|
||||
|
||||
// waitForExit blocks until the outboxWriteLoop has exited.
|
||||
// While this function blocks, it is not yet safe to exit the http handler,
|
||||
// because the outboxWriteLoop may still be writing to the http ResponseWriter.
|
||||
func (es *eventStreamer) waitForExit() {
|
||||
<-es.openUntilExit
|
||||
}
|
||||
|
||||
func writeLazyReaderWithRecover(w *streamingResponseWriterController, lr lazyReader) (err error) {
|
||||
func writeLazyReaderWithRecover(w StreamingResponseWriter, lr lazyReader) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.WithField("panic", r).Error("Recovered from panic while writing event to client.")
|
||||
err = errWriterUnusable
|
||||
}
|
||||
}()
|
||||
r := lr()
|
||||
out, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(out)
|
||||
_, err = io.Copy(w, lr())
|
||||
return err
|
||||
}
|
||||
|
||||
func (es *eventStreamer) writeOutbox(ctx context.Context, w *streamingResponseWriterController, first lazyReader) error {
|
||||
// The outboxWriteLoop is responsible for managing the keep-alive timer and toggling between reading from the outbox
|
||||
// when it is ready, only allowing the keep-alive to fire when there hasn't been a write in the keep-alive interval.
|
||||
// Since outboxWriteLoop will get either the first event or the keep-alive, we let it pass in the first event to write,
|
||||
// either the event's lazyReader, or nil for a keep-alive.
|
||||
func (es *eventStreamer) writeOutbox(ctx context.Context, w StreamingResponseWriter, first lazyReader) error {
|
||||
needKeepAlive := true
|
||||
if first != nil {
|
||||
if err := writeLazyReaderWithRecover(w, first); err != nil {
|
||||
@@ -359,11 +337,6 @@ func (es *eventStreamer) writeOutbox(ctx context.Context, w *streamingResponseWr
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case rf := <-es.outbox:
|
||||
// We don't want to call Flush until we've exhausted all the writes - it's always preferrable to
|
||||
// just keep draining the outbox and rely on the underlying Write code to flush+block when it
|
||||
// needs to based on buffering. Whenever we fill the buffer with a string of writes, the underlying
|
||||
// code will flush on its own, so it's better to explicitly flush only once, after we've totally
|
||||
// drained the outbox, to catch any dangling bytes stuck in a buffer.
|
||||
if err := writeLazyReaderWithRecover(w, rf); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -374,7 +347,8 @@ func (es *eventStreamer) writeOutbox(ctx context.Context, w *streamingResponseWr
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.Flush()
|
||||
w.Flush()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -664,51 +638,3 @@ func (s *Server) currentPayloadAttributes(ctx context.Context) (lazyReader, erro
|
||||
})
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newStreamingResponseController(rw http.ResponseWriter, timeout time.Duration) *streamingResponseWriterController {
|
||||
rc := http.NewResponseController(rw)
|
||||
return &streamingResponseWriterController{
|
||||
timeout: timeout,
|
||||
rw: rw,
|
||||
rc: rc,
|
||||
}
|
||||
}
|
||||
|
||||
// streamingResponseWriterController provides an interface similar to an http.ResponseWriter,
|
||||
// wrapping an http.ResponseWriter and an http.ResponseController, using the ResponseController
|
||||
// to set and clear deadlines for Write and Flush methods, and delegating to the underlying
|
||||
// types to Write and Flush.
|
||||
type streamingResponseWriterController struct {
|
||||
timeout time.Duration
|
||||
rw http.ResponseWriter
|
||||
rc *http.ResponseController
|
||||
}
|
||||
|
||||
func (c *streamingResponseWriterController) Write(b []byte) (int, error) {
|
||||
if err := c.setDeadline(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
out, err := c.rw.Write(b)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
return out, c.clearDeadline()
|
||||
}
|
||||
|
||||
func (c *streamingResponseWriterController) setDeadline() error {
|
||||
return c.rc.SetWriteDeadline(time.Now().Add(c.timeout))
|
||||
}
|
||||
|
||||
func (c *streamingResponseWriterController) clearDeadline() error {
|
||||
return c.rc.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
func (c *streamingResponseWriterController) Flush() error {
|
||||
if err := c.setDeadline(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.rc.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.clearDeadline()
|
||||
}
|
||||
|
||||
@@ -27,12 +27,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
sse "github.com/r3labs/sse/v2"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var testEventWriteTimeout = 100 * time.Millisecond
|
||||
|
||||
func requireAllEventsReceived(t *testing.T, stn, opn *mockChain.EventFeedWrapper, events []*feed.Event, req *topicRequest, s *Server, w *StreamingResponseWriterRecorder, logs chan *logrus.Entry) {
|
||||
func requireAllEventsReceived(t *testing.T, stn, opn *mockChain.EventFeedWrapper, events []*feed.Event, req *topicRequest, s *Server, w *StreamingResponseWriterRecorder) {
|
||||
// maxBufferSize param copied from sse lib client code
|
||||
sseR := sse.NewEventStreamReader(w.Body(), 1<<24)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
@@ -80,29 +77,21 @@ func requireAllEventsReceived(t *testing.T, stn, opn *mockChain.EventFeedWrapper
|
||||
}
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case entry := <-logs:
|
||||
errAttr, ok := entry.Data[logrus.ErrorKey]
|
||||
if ok {
|
||||
t.Errorf("unexpected error in logs: %v", errAttr)
|
||||
}
|
||||
case <-done:
|
||||
require.Equal(t, 0, len(expected), "expected events not seen")
|
||||
return
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("context canceled / timed out waiting for events, err=%v", ctx.Err())
|
||||
}
|
||||
select {
|
||||
case <-done:
|
||||
break
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("context canceled / timed out waiting for events, err=%v", ctx.Err())
|
||||
}
|
||||
require.Equal(t, 0, len(expected), "expected events not seen")
|
||||
}
|
||||
|
||||
func (tr *topicRequest) testHttpRequest(ctx context.Context, _ *testing.T) *http.Request {
|
||||
func (tr *topicRequest) testHttpRequest(_ *testing.T) *http.Request {
|
||||
tq := make([]string, 0, len(tr.topics))
|
||||
for topic := range tr.topics {
|
||||
tq = append(tq, "topics="+topic)
|
||||
}
|
||||
req := httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com/eth/v1/events?%s", strings.Join(tq, "&")), nil)
|
||||
return req.WithContext(ctx)
|
||||
return httptest.NewRequest(http.MethodGet, fmt.Sprintf("http://example.com/eth/v1/events?%s", strings.Join(tq, "&")), nil)
|
||||
}
|
||||
|
||||
func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
@@ -246,77 +235,31 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
}
|
||||
}
|
||||
|
||||
type streamTestSync struct {
|
||||
done chan struct{}
|
||||
cancel func()
|
||||
undo func()
|
||||
logs chan *logrus.Entry
|
||||
ctx context.Context
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (s *streamTestSync) cleanup() {
|
||||
s.cancel()
|
||||
select {
|
||||
case <-s.done:
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
s.t.Fatal("timed out waiting for handler to finish")
|
||||
}
|
||||
s.undo()
|
||||
}
|
||||
|
||||
func (s *streamTestSync) markDone() {
|
||||
close(s.done)
|
||||
}
|
||||
|
||||
func newStreamTestSync(t *testing.T) *streamTestSync {
|
||||
logChan := make(chan *logrus.Entry, 100)
|
||||
cew := util.NewChannelEntryWriter(logChan)
|
||||
undo := util.RegisterHookWithUndo(logger, cew)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &streamTestSync{
|
||||
t: t,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
logs: logChan,
|
||||
undo: undo,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
t.Run("operations", func(t *testing.T) {
|
||||
testSync := newStreamTestSync(t)
|
||||
defer testSync.cleanup()
|
||||
stn := mockChain.NewEventFeedWrapper()
|
||||
opn := mockChain.NewEventFeedWrapper()
|
||||
s := &Server{
|
||||
StateNotifier: &mockChain.SimpleNotifier{Feed: stn},
|
||||
OperationNotifier: &mockChain.SimpleNotifier{Feed: opn},
|
||||
EventWriteTimeout: testEventWriteTimeout,
|
||||
}
|
||||
|
||||
topics, events := operationEventsFixtures(t)
|
||||
request := topics.testHttpRequest(testSync.ctx, t)
|
||||
w := NewStreamingResponseWriterRecorder(testSync.ctx)
|
||||
request := topics.testHttpRequest(t)
|
||||
w := NewStreamingResponseWriterRecorder()
|
||||
|
||||
go func() {
|
||||
s.StreamEvents(w, request)
|
||||
testSync.markDone()
|
||||
}()
|
||||
|
||||
requireAllEventsReceived(t, stn, opn, events, topics, s, w, testSync.logs)
|
||||
requireAllEventsReceived(t, stn, opn, events, topics, s, w)
|
||||
})
|
||||
t.Run("state", func(t *testing.T) {
|
||||
testSync := newStreamTestSync(t)
|
||||
defer testSync.cleanup()
|
||||
|
||||
stn := mockChain.NewEventFeedWrapper()
|
||||
opn := mockChain.NewEventFeedWrapper()
|
||||
s := &Server{
|
||||
StateNotifier: &mockChain.SimpleNotifier{Feed: stn},
|
||||
OperationNotifier: &mockChain.SimpleNotifier{Feed: opn},
|
||||
EventWriteTimeout: testEventWriteTimeout,
|
||||
}
|
||||
|
||||
topics, err := newTopicRequest([]string{
|
||||
@@ -326,8 +269,8 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
BlockTopic,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
request := topics.testHttpRequest(testSync.ctx, t)
|
||||
w := NewStreamingResponseWriterRecorder(testSync.ctx)
|
||||
request := topics.testHttpRequest(t)
|
||||
w := NewStreamingResponseWriterRecorder()
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(ð.SignedBeaconBlock{}))
|
||||
require.NoError(t, err)
|
||||
@@ -380,10 +323,9 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
|
||||
go func() {
|
||||
s.StreamEvents(w, request)
|
||||
testSync.markDone()
|
||||
}()
|
||||
|
||||
requireAllEventsReceived(t, stn, opn, events, topics, s, w, testSync.logs)
|
||||
requireAllEventsReceived(t, stn, opn, events, topics, s, w)
|
||||
})
|
||||
t.Run("payload attributes", func(t *testing.T) {
|
||||
type testCase struct {
|
||||
@@ -454,93 +396,59 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
testSync := newStreamTestSync(t)
|
||||
defer testSync.cleanup()
|
||||
st := tc.getState()
|
||||
v := ð.Validator{ExitEpoch: math.MaxUint64}
|
||||
require.NoError(t, st.SetValidators([]*eth.Validator{v}))
|
||||
currentSlot := primitives.Slot(0)
|
||||
// to avoid slot processing
|
||||
require.NoError(t, st.SetSlot(currentSlot+1))
|
||||
b := tc.getBlock()
|
||||
mockChainService := &mockChain.ChainService{
|
||||
Root: make([]byte, 32),
|
||||
State: st,
|
||||
Block: b,
|
||||
Slot: ¤tSlot,
|
||||
}
|
||||
|
||||
st := tc.getState()
|
||||
v := ð.Validator{ExitEpoch: math.MaxUint64}
|
||||
require.NoError(t, st.SetValidators([]*eth.Validator{v}))
|
||||
currentSlot := primitives.Slot(0)
|
||||
// to avoid slot processing
|
||||
require.NoError(t, st.SetSlot(currentSlot+1))
|
||||
b := tc.getBlock()
|
||||
mockChainService := &mockChain.ChainService{
|
||||
Root: make([]byte, 32),
|
||||
State: st,
|
||||
Block: b,
|
||||
Slot: ¤tSlot,
|
||||
}
|
||||
stn := mockChain.NewEventFeedWrapper()
|
||||
opn := mockChain.NewEventFeedWrapper()
|
||||
s := &Server{
|
||||
StateNotifier: &mockChain.SimpleNotifier{Feed: stn},
|
||||
OperationNotifier: &mockChain.SimpleNotifier{Feed: opn},
|
||||
HeadFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
}
|
||||
if tc.SetTrackedValidatorsCache != nil {
|
||||
tc.SetTrackedValidatorsCache(s.TrackedValidatorsCache)
|
||||
}
|
||||
topics, err := newTopicRequest([]string{PayloadAttributesTopic})
|
||||
require.NoError(t, err)
|
||||
request := topics.testHttpRequest(t)
|
||||
w := NewStreamingResponseWriterRecorder()
|
||||
events := []*feed.Event{&feed.Event{Type: statefeed.MissedSlot}}
|
||||
|
||||
stn := mockChain.NewEventFeedWrapper()
|
||||
opn := mockChain.NewEventFeedWrapper()
|
||||
s := &Server{
|
||||
StateNotifier: &mockChain.SimpleNotifier{Feed: stn},
|
||||
OperationNotifier: &mockChain.SimpleNotifier{Feed: opn},
|
||||
HeadFetcher: mockChainService,
|
||||
ChainInfoFetcher: mockChainService,
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
EventWriteTimeout: testEventWriteTimeout,
|
||||
}
|
||||
if tc.SetTrackedValidatorsCache != nil {
|
||||
tc.SetTrackedValidatorsCache(s.TrackedValidatorsCache)
|
||||
}
|
||||
topics, err := newTopicRequest([]string{PayloadAttributesTopic})
|
||||
require.NoError(t, err)
|
||||
request := topics.testHttpRequest(testSync.ctx, t)
|
||||
w := NewStreamingResponseWriterRecorder(testSync.ctx)
|
||||
events := []*feed.Event{&feed.Event{Type: statefeed.MissedSlot}}
|
||||
|
||||
go func() {
|
||||
s.StreamEvents(w, request)
|
||||
testSync.markDone()
|
||||
}()
|
||||
requireAllEventsReceived(t, stn, opn, events, topics, s, w, testSync.logs)
|
||||
})
|
||||
go func() {
|
||||
s.StreamEvents(w, request)
|
||||
}()
|
||||
requireAllEventsReceived(t, stn, opn, events, topics, s, w)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestStuckReaderScenarios(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
queueDepth func([]*feed.Event) int
|
||||
}{
|
||||
{
|
||||
name: "slow reader - queue overflows",
|
||||
queueDepth: func(events []*feed.Event) int {
|
||||
return len(events) - 1
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "slow reader - all queued, but writer is stuck, write timeout",
|
||||
queueDepth: func(events []*feed.Event) int {
|
||||
return len(events) + 1
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
wedgedWriterTestCase(t, c.queueDepth)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func wedgedWriterTestCase(t *testing.T, queueDepth func([]*feed.Event) int) {
|
||||
func TestStuckReader(t *testing.T) {
|
||||
topics, events := operationEventsFixtures(t)
|
||||
require.Equal(t, 8, len(events))
|
||||
|
||||
// set eventFeedDepth to a number lower than the events we intend to send to force the server to drop the reader.
|
||||
stn := mockChain.NewEventFeedWrapper()
|
||||
opn := mockChain.NewEventFeedWrapper()
|
||||
s := &Server{
|
||||
EventWriteTimeout: 10 * time.Millisecond,
|
||||
StateNotifier: &mockChain.SimpleNotifier{Feed: stn},
|
||||
OperationNotifier: &mockChain.SimpleNotifier{Feed: opn},
|
||||
EventFeedDepth: queueDepth(events),
|
||||
EventFeedDepth: len(events) - 1,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
eventsWritten := make(chan struct{})
|
||||
go func() {
|
||||
@@ -560,8 +468,8 @@ func wedgedWriterTestCase(t *testing.T, queueDepth func([]*feed.Event) int) {
|
||||
close(eventsWritten)
|
||||
}()
|
||||
|
||||
request := topics.testHttpRequest(ctx, t)
|
||||
w := NewStreamingResponseWriterRecorder(ctx)
|
||||
request := topics.testHttpRequest(t)
|
||||
w := NewStreamingResponseWriterRecorder()
|
||||
|
||||
handlerFinished := make(chan struct{})
|
||||
go func() {
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
@@ -19,66 +17,32 @@ type StreamingResponseWriterRecorder struct {
|
||||
status chan int
|
||||
bodyRecording []byte
|
||||
flushed bool
|
||||
writeDeadline time.Time
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (w *StreamingResponseWriterRecorder) StatusChan() chan int {
|
||||
return w.status
|
||||
}
|
||||
|
||||
func NewStreamingResponseWriterRecorder(ctx context.Context) *StreamingResponseWriterRecorder {
|
||||
func NewStreamingResponseWriterRecorder() *StreamingResponseWriterRecorder {
|
||||
r, w := io.Pipe()
|
||||
return &StreamingResponseWriterRecorder{
|
||||
ResponseWriter: httptest.NewRecorder(),
|
||||
r: r,
|
||||
w: w,
|
||||
status: make(chan int, 1),
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// Write implements http.ResponseWriter.
|
||||
func (w *StreamingResponseWriterRecorder) Write(data []byte) (int, error) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
written, err := writeWithDeadline(w.ctx, w.w, data, w.writeDeadline)
|
||||
n, err := w.w.Write(data)
|
||||
if err != nil {
|
||||
return written, err
|
||||
return n, err
|
||||
}
|
||||
// The test response writer is non-blocking.
|
||||
return w.ResponseWriter.Write(data)
|
||||
}
|
||||
|
||||
var zeroTimeValue = time.Time{}
|
||||
|
||||
func writeWithDeadline(ctx context.Context, w io.Writer, data []byte, deadline time.Time) (int, error) {
|
||||
result := struct {
|
||||
written int
|
||||
err error
|
||||
}{}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
result.written, result.err = w.Write(data)
|
||||
}()
|
||||
if deadline == zeroTimeValue {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
case <-done:
|
||||
return result.written, result.err
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-time.After(time.Until(deadline)):
|
||||
return 0, http.ErrHandlerTimeout
|
||||
case <-done:
|
||||
return result.written, result.err
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// WriteHeader implements http.ResponseWriter.
|
||||
func (w *StreamingResponseWriterRecorder) WriteHeader(statusCode int) {
|
||||
if w.statusWritten != nil {
|
||||
@@ -101,7 +65,6 @@ func (w *StreamingResponseWriterRecorder) RequireStatus(t *testing.T, status int
|
||||
}
|
||||
|
||||
func (w *StreamingResponseWriterRecorder) Flush() {
|
||||
w.WriteHeader(200)
|
||||
fw, ok := w.ResponseWriter.(http.Flusher)
|
||||
if ok {
|
||||
fw.Flush()
|
||||
@@ -109,9 +72,4 @@ func (w *StreamingResponseWriterRecorder) Flush() {
|
||||
w.flushed = true
|
||||
}
|
||||
|
||||
func (w *StreamingResponseWriterRecorder) SetWriteDeadline(d time.Time) error {
|
||||
w.writeDeadline = d
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ http.ResponseWriter = &StreamingResponseWriterRecorder{}
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
package events
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var logger = logrus.StandardLogger()
|
||||
var log = logger.WithField("prefix", "events")
|
||||
@@ -22,5 +22,4 @@ type Server struct {
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
KeepAliveInterval time.Duration
|
||||
EventFeedDepth int
|
||||
EventWriteTimeout time.Duration
|
||||
}
|
||||
|
||||
@@ -329,7 +329,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justified *eth.Checkpoint,
|
||||
finalized *eth.Checkpoint,
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
blockHeader := ð.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -350,24 +350,5 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ð.SignedBeaconBlockBellatrix{
|
||||
Block: ð.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ð.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
@@ -56,9 +56,7 @@ func (_ *Server) SetLoggingLevel(_ context.Context, req *pbrpc.LoggingLevelReque
|
||||
// Libp2p specific logging.
|
||||
golog.SetAllLoggers(golog.LevelDebug)
|
||||
// Geth specific logging.
|
||||
glogger := gethlog.NewGlogHandler(gethlog.StreamHandler(os.Stderr, gethlog.TerminalFormat(true)))
|
||||
glogger.Verbosity(gethlog.LvlTrace)
|
||||
gethlog.Root().SetHandler(glogger)
|
||||
gethlog.SetDefault(gethlog.NewLogger(gethlog.NewTerminalHandlerWithLevel(os.Stderr, gethlog.LvlTrace, true)))
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
|
||||
@@ -14,10 +14,8 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -151,7 +149,7 @@ func createState(
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
|
||||
base := ðpb.BeaconStateBellatrix{
|
||||
Slot: slot,
|
||||
@@ -169,24 +167,5 @@ func createState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
@@ -91,55 +91,55 @@ type Service struct {
|
||||
|
||||
// Config options for the beacon node RPC server.
|
||||
type Config struct {
|
||||
ExecutionReconstructor execution.Reconstructor
|
||||
Host string
|
||||
Port string
|
||||
CertFlag string
|
||||
KeyFlag string
|
||||
BeaconMonitoringHost string
|
||||
BeaconMonitoringPort int
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
CanonicalFetcher blockchain.CanonicalFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
MockEth1Votes bool
|
||||
EnableDebugRPCEndpoints bool
|
||||
AttestationsPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingsPool slashings.PoolManager
|
||||
SyncCommitteeObjectPool synccommittee.Pool
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
SyncService chainSync.Checker
|
||||
Broadcaster p2p.Broadcaster
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
MetadataProvider p2p.MetadataProvider
|
||||
DepositFetcher cache.DepositFetcher
|
||||
PendingDepositFetcher depositsnapshot.PendingDepositsFetcher
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
OperationNotifier opfeed.Notifier
|
||||
StateGen *stategen.State
|
||||
MaxMsgSize int
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
BlockBuilder builder.BlockBuilder
|
||||
Router *http.ServeMux
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
ExecutionPayloadReconstructor execution.PayloadReconstructor
|
||||
Host string
|
||||
Port string
|
||||
CertFlag string
|
||||
KeyFlag string
|
||||
BeaconMonitoringHost string
|
||||
BeaconMonitoringPort int
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
ChainInfoFetcher blockchain.ChainInfoFetcher
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
CanonicalFetcher blockchain.CanonicalFetcher
|
||||
ForkFetcher blockchain.ForkFetcher
|
||||
ForkchoiceFetcher blockchain.ForkchoiceFetcher
|
||||
FinalizationFetcher blockchain.FinalizationFetcher
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
GenesisTimeFetcher blockchain.TimeFetcher
|
||||
GenesisFetcher blockchain.GenesisFetcher
|
||||
MockEth1Votes bool
|
||||
EnableDebugRPCEndpoints bool
|
||||
AttestationsPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingsPool slashings.PoolManager
|
||||
SyncCommitteeObjectPool synccommittee.Pool
|
||||
BLSChangesPool blstoexec.PoolManager
|
||||
SyncService chainSync.Checker
|
||||
Broadcaster p2p.Broadcaster
|
||||
PeersFetcher p2p.PeersProvider
|
||||
PeerManager p2p.PeerManager
|
||||
MetadataProvider p2p.MetadataProvider
|
||||
DepositFetcher cache.DepositFetcher
|
||||
PendingDepositFetcher depositsnapshot.PendingDepositsFetcher
|
||||
StateNotifier statefeed.Notifier
|
||||
BlockNotifier blockfeed.Notifier
|
||||
OperationNotifier opfeed.Notifier
|
||||
StateGen *stategen.State
|
||||
MaxMsgSize int
|
||||
ExecutionEngineCaller execution.EngineCaller
|
||||
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||
BlockBuilder builder.BlockBuilder
|
||||
Router *http.ServeMux
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
}
|
||||
|
||||
// NewService instantiates a new RPC service instance that will
|
||||
|
||||
@@ -82,7 +82,6 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
customtypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native/custom-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ToProtoUnsafe returns the pointer value of the underlying
|
||||
@@ -18,19 +15,16 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
startTime := time.Now()
|
||||
|
||||
gvrCopy := b.genesisValidatorsRoot
|
||||
br := b.blockRootsVal().Slice()
|
||||
sr := b.stateRootsVal().Slice()
|
||||
rm := b.randaoMixesVal().Slice()
|
||||
log.Infof("Generated slices for state in %s", time.Since(startTime).String())
|
||||
var vals []*ethpb.Validator
|
||||
var bals []uint64
|
||||
if features.Get().EnableExperimentalState {
|
||||
vals = b.validatorsVal()
|
||||
log.Infof("Generated validators for state in %s", time.Since(startTime).String())
|
||||
bals = b.balancesVal()
|
||||
log.Infof("Generated balances for state in %s", time.Since(startTime).String())
|
||||
} else {
|
||||
vals = b.validators
|
||||
bals = b.balances
|
||||
@@ -148,9 +142,6 @@ func (b *BeaconState) ToProtoUnsafe() interface{} {
|
||||
HistoricalSummaries: b.historicalSummaries,
|
||||
}
|
||||
case version.Deneb:
|
||||
defer func() {
|
||||
log.Infof("Generated roots and inactivitu scores for state in %s", time.Since(startTime).String())
|
||||
}()
|
||||
return ðpb.BeaconStateDeneb{
|
||||
GenesisTime: b.genesisTime,
|
||||
GenesisValidatorsRoot: gvrCopy[:],
|
||||
|
||||
@@ -49,59 +49,56 @@ func (b *BeaconState) NextWithdrawalValidatorIndex() (primitives.ValidatorIndex,
|
||||
// Spec definition:
|
||||
//
|
||||
// def get_expected_withdrawals(state: BeaconState) -> Tuple[Sequence[Withdrawal], uint64]:
|
||||
// epoch = get_current_epoch(state)
|
||||
// withdrawal_index = state.next_withdrawal_index
|
||||
// validator_index = state.next_withdrawal_validator_index
|
||||
// withdrawals: List[Withdrawal] = []
|
||||
// processed_partial_withdrawals_count = 0
|
||||
// epoch = get_current_epoch(state)
|
||||
// withdrawal_index = state.next_withdrawal_index
|
||||
// validator_index = state.next_withdrawal_validator_index
|
||||
// withdrawals: List[Withdrawal] = []
|
||||
//
|
||||
// # [New in Electra:EIP7251] Consume pending partial withdrawals
|
||||
// for withdrawal in state.pending_partial_withdrawals:
|
||||
// if withdrawal.withdrawable_epoch > epoch or len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP:
|
||||
// break
|
||||
// # [New in Electra:EIP7251] Consume pending partial withdrawals
|
||||
// for withdrawal in state.pending_partial_withdrawals:
|
||||
// if withdrawal.withdrawable_epoch > epoch or len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP:
|
||||
// break
|
||||
//
|
||||
// validator = state.validators[withdrawal.index]
|
||||
// has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
// has_excess_balance = state.balances[withdrawal.index] > MIN_ACTIVATION_BALANCE
|
||||
// if validator.exit_epoch == FAR_FUTURE_EPOCH and has_sufficient_effective_balance and has_excess_balance:
|
||||
// withdrawable_balance = min(state.balances[withdrawal.index] - MIN_ACTIVATION_BALANCE, withdrawal.amount)
|
||||
// withdrawals.append(Withdrawal(
|
||||
// index=withdrawal_index,
|
||||
// validator_index=withdrawal.index,
|
||||
// address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
// amount=withdrawable_balance,
|
||||
// ))
|
||||
// withdrawal_index += WithdrawalIndex(1)
|
||||
// validator = state.validators[withdrawal.index]
|
||||
// has_sufficient_effective_balance = validator.effective_balance >= MIN_ACTIVATION_BALANCE
|
||||
// has_excess_balance = state.balances[withdrawal.index] > MIN_ACTIVATION_BALANCE
|
||||
// if validator.exit_epoch == FAR_FUTURE_EPOCH and has_sufficient_effective_balance and has_excess_balance:
|
||||
// withdrawable_balance = min(state.balances[withdrawal.index] - MIN_ACTIVATION_BALANCE, withdrawal.amount)
|
||||
// withdrawals.append(Withdrawal(
|
||||
// index=withdrawal_index,
|
||||
// validator_index=withdrawal.index,
|
||||
// address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
// amount=withdrawable_balance,
|
||||
// ))
|
||||
// withdrawal_index += WithdrawalIndex(1)
|
||||
//
|
||||
// processed_partial_withdrawals_count += 1
|
||||
// partial_withdrawals_count = len(withdrawals)
|
||||
//
|
||||
// # Sweep for remaining.
|
||||
// bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
// for _ in range(bound):
|
||||
// validator = state.validators[validator_index]
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// partially_withdrawn_balance = sum(withdrawal.amount for withdrawal in withdrawals if withdrawal.validator_index == validator_index)
|
||||
// balance = state.balances[validator_index] - partially_withdrawn_balance
|
||||
// if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
// withdrawals.append(Withdrawal(
|
||||
// index=withdrawal_index,
|
||||
// validator_index=validator_index,
|
||||
// address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
// amount=balance,
|
||||
// ))
|
||||
// withdrawal_index += WithdrawalIndex(1)
|
||||
// elif is_partially_withdrawable_validator(validator, balance):
|
||||
// withdrawals.append(Withdrawal(
|
||||
// index=withdrawal_index,
|
||||
// validator_index=validator_index,
|
||||
// address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
// amount=balance - get_validator_max_effective_balance(validator), # [Modified in Electra:EIP7251]
|
||||
// ))
|
||||
// withdrawal_index += WithdrawalIndex(1)
|
||||
// if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// break
|
||||
// validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
|
||||
// return withdrawals, processed_partial_withdrawals_count
|
||||
// # Sweep for remaining.
|
||||
// bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
// for _ in range(bound):
|
||||
// validator = state.validators[validator_index]
|
||||
// balance = state.balances[validator_index]
|
||||
// if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
// withdrawals.append(Withdrawal(
|
||||
// index=withdrawal_index,
|
||||
// validator_index=validator_index,
|
||||
// address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
// amount=balance,
|
||||
// ))
|
||||
// withdrawal_index += WithdrawalIndex(1)
|
||||
// elif is_partially_withdrawable_validator(validator, balance):
|
||||
// withdrawals.append(Withdrawal(
|
||||
// index=withdrawal_index,
|
||||
// validator_index=validator_index,
|
||||
// address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
// amount=balance - get_validator_max_effective_balance(validator), # [Modified in Electra:EIP7251]
|
||||
// ))
|
||||
// withdrawal_index += WithdrawalIndex(1)
|
||||
// if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// break
|
||||
// validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
|
||||
// return withdrawals, partial_withdrawals_count
|
||||
func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, error) {
|
||||
if b.version < version.Capella {
|
||||
return nil, 0, errNotSupported("ExpectedWithdrawals", b.version)
|
||||
@@ -116,7 +113,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
epoch := slots.ToEpoch(b.slot)
|
||||
|
||||
// Electra partial withdrawals functionality.
|
||||
var processedPartialWithdrawalsCount uint64
|
||||
var partialWithdrawalsCount uint64
|
||||
if b.version >= version.Electra {
|
||||
for _, w := range b.pendingPartialWithdrawals {
|
||||
if w.WithdrawableEpoch > epoch || len(withdrawals) >= int(params.BeaconConfig().MaxPendingPartialsPerWithdrawalsSweep) {
|
||||
@@ -143,7 +140,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
})
|
||||
withdrawalIndex++
|
||||
}
|
||||
processedPartialWithdrawalsCount++
|
||||
partialWithdrawalsCount++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -158,15 +155,6 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "could not retrieve balance at index %d", validatorIndex)
|
||||
}
|
||||
if b.version >= version.Electra {
|
||||
var partiallyWithdrawnBalance uint64
|
||||
for _, w := range withdrawals {
|
||||
if w.ValidatorIndex == validatorIndex {
|
||||
partiallyWithdrawnBalance += w.Amount
|
||||
}
|
||||
}
|
||||
balance = balance - partiallyWithdrawnBalance
|
||||
}
|
||||
if helpers.IsFullyWithdrawableValidator(val, balance, epoch, b.version) {
|
||||
withdrawals = append(withdrawals, &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
@@ -193,7 +181,7 @@ func (b *BeaconState) ExpectedWithdrawals() ([]*enginev1.Withdrawal, uint64, err
|
||||
}
|
||||
}
|
||||
|
||||
return withdrawals, processedPartialWithdrawalsCount, nil
|
||||
return withdrawals, partialWithdrawalsCount, nil
|
||||
}
|
||||
|
||||
func (b *BeaconState) PendingPartialWithdrawals() ([]*ethpb.PendingPartialWithdrawal, error) {
|
||||
|
||||
@@ -367,52 +367,4 @@ func TestExpectedWithdrawals(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(10), partialWithdrawalsCount)
|
||||
})
|
||||
t.Run("electra same validator has one partially and one fully withdrawable", func(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
vals := make([]*ethpb.Validator, 100)
|
||||
balances := make([]uint64, 100)
|
||||
for i := range vals {
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
val := ðpb.Validator{
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawableEpoch: primitives.Epoch(1),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
val.WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
val.WithdrawalCredentials[31] = byte(i)
|
||||
vals[i] = val
|
||||
}
|
||||
balances[1] += params.BeaconConfig().MinDepositAmount
|
||||
vals[1].WithdrawableEpoch = primitives.Epoch(0)
|
||||
require.NoError(t, s.SetValidators(vals))
|
||||
require.NoError(t, s.SetBalances(balances))
|
||||
// Give validator a pending balance to withdraw.
|
||||
require.NoError(t, s.AppendPendingPartialWithdrawal(ðpb.PendingPartialWithdrawal{
|
||||
Index: 1,
|
||||
Amount: balances[1], // will only deduct excess even though balance is more that minimum activation
|
||||
WithdrawableEpoch: primitives.Epoch(0),
|
||||
}))
|
||||
p, err := s.PendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(p))
|
||||
expected, _, err := s.ExpectedWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(expected))
|
||||
|
||||
withdrawalFull := &enginev1.Withdrawal{
|
||||
Index: 1,
|
||||
ValidatorIndex: 1,
|
||||
Address: vals[1].WithdrawalCredentials[12:],
|
||||
Amount: balances[1] - params.BeaconConfig().MinDepositAmount, // subtract the partial from this
|
||||
}
|
||||
withdrawalPartial := &enginev1.Withdrawal{
|
||||
Index: 0,
|
||||
ValidatorIndex: 1,
|
||||
Address: vals[1].WithdrawalCredentials[12:],
|
||||
Amount: params.BeaconConfig().MinDepositAmount,
|
||||
}
|
||||
require.DeepEqual(t, withdrawalPartial, expected[0])
|
||||
require.DeepEqual(t, withdrawalFull, expected[1])
|
||||
})
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
if slot%128 == 0 && slot != 0 {
|
||||
if slot%s.slotsPerArchivedPoint == 0 && slot != 0 {
|
||||
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get epoch boundary state for slot %d", slot)
|
||||
|
||||
@@ -170,20 +170,6 @@ var (
|
||||
Help: "The number of blob sidecars that were dropped due to missing parent block",
|
||||
},
|
||||
)
|
||||
|
||||
blobRecoveredFromELTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "blob_recovered_from_el_total",
|
||||
Help: "Count the number of times blobs have been recovered from the execution layer.",
|
||||
},
|
||||
)
|
||||
|
||||
blobExistedInDBTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "blob_existed_in_db_total",
|
||||
Help: "Count the number of times blobs have been found in the database.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -127,9 +127,9 @@ func WithSlasherBlockHeadersFeed(slasherBlockHeadersFeed *event.Feed) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithReconstructor(r execution.Reconstructor) Option {
|
||||
func WithPayloadReconstructor(r execution.PayloadReconstructor) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.executionReconstructor = r
|
||||
s.cfg.executionPayloadReconstructor = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ func (s *Service) writeBlockBatchToStream(ctx context.Context, batch blockBatch,
|
||||
return nil
|
||||
}
|
||||
|
||||
reconstructed, err := s.cfg.executionReconstructor.ReconstructFullBellatrixBlockBatch(ctx, blinded)
|
||||
reconstructed, err := s.cfg.executionPayloadReconstructor.ReconstructFullBellatrixBlockBatch(ctx, blinded)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not reconstruct full bellatrix block batch from blinded bodies")
|
||||
return err
|
||||
|
||||
@@ -239,11 +239,11 @@ func TestRPCBeaconBlocksByRange_ReconstructsPayloads(t *testing.T) {
|
||||
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: d,
|
||||
chain: &chainMock.ChainService{},
|
||||
clock: clock,
|
||||
executionReconstructor: mockEngine,
|
||||
p2p: p1,
|
||||
beaconDB: d,
|
||||
chain: &chainMock.ChainService{},
|
||||
clock: clock,
|
||||
executionPayloadReconstructor: mockEngine,
|
||||
},
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
availableBlocker: mockBlocker{avail: true},
|
||||
|
||||
@@ -112,7 +112,7 @@ func (s *Service) beaconBlocksRootRPCHandler(ctx context.Context, msg interface{
|
||||
}
|
||||
|
||||
if blk.Block().IsBlinded() {
|
||||
blk, err = s.cfg.executionReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
blk, err = s.cfg.executionPayloadReconstructor.ReconstructFullBlock(ctx, blk)
|
||||
if err != nil {
|
||||
if errors.Is(err, execution.ErrEmptyBlockHash) {
|
||||
log.WithError(err).Warn("Could not reconstruct block from header with syncing execution client. Waiting to complete syncing")
|
||||
|
||||
@@ -151,11 +151,11 @@ func TestRecentBeaconBlocksRPCHandler_ReturnsBlocks_ReconstructsPayload(t *testi
|
||||
},
|
||||
}
|
||||
r := &Service{cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: d,
|
||||
executionReconstructor: mockEngine,
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
p2p: p1,
|
||||
beaconDB: d,
|
||||
executionPayloadReconstructor: mockEngine,
|
||||
chain: &mock.ChainService{ValidatorsRoot: [32]byte{}},
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
}, rateLimiter: newRateLimiter(p1)}
|
||||
pcl := protocol.ID(p2p.RPCBlocksByRootTopicV1)
|
||||
topic := string(pcl)
|
||||
|
||||
@@ -77,25 +77,25 @@ type validationFn func(ctx context.Context) (pubsub.ValidationResult, error)
|
||||
|
||||
// config to hold dependencies for the sync service.
|
||||
type config struct {
|
||||
attestationNotifier operation.Notifier
|
||||
p2p p2p.P2P
|
||||
beaconDB db.NoHeadAccessDatabase
|
||||
attPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingPool slashings.PoolManager
|
||||
syncCommsPool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
chain blockchainService
|
||||
initialSync Checker
|
||||
blockNotifier blockfeed.Notifier
|
||||
operationNotifier operation.Notifier
|
||||
executionReconstructor execution.Reconstructor
|
||||
stateGen *stategen.State
|
||||
slasherAttestationsFeed *event.Feed
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
clock *startup.Clock
|
||||
stateNotifier statefeed.Notifier
|
||||
blobStorage *filesystem.BlobStorage
|
||||
attestationNotifier operation.Notifier
|
||||
p2p p2p.P2P
|
||||
beaconDB db.NoHeadAccessDatabase
|
||||
attPool attestations.Pool
|
||||
exitPool voluntaryexits.PoolManager
|
||||
slashingPool slashings.PoolManager
|
||||
syncCommsPool synccommittee.Pool
|
||||
blsToExecPool blstoexec.PoolManager
|
||||
chain blockchainService
|
||||
initialSync Checker
|
||||
blockNotifier blockfeed.Notifier
|
||||
operationNotifier operation.Notifier
|
||||
executionPayloadReconstructor execution.PayloadReconstructor
|
||||
stateGen *stategen.State
|
||||
slasherAttestationsFeed *event.Feed
|
||||
slasherBlockHeadersFeed *event.Feed
|
||||
clock *startup.Clock
|
||||
stateNotifier statefeed.Notifier
|
||||
blobStorage *filesystem.BlobStorage
|
||||
}
|
||||
|
||||
// This defines the interface for interacting with block chain service
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -34,8 +33,6 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
return err
|
||||
}
|
||||
|
||||
go s.reconstructAndBroadcastBlobs(ctx, signed)
|
||||
|
||||
if err := s.cfg.chain.ReceiveBlock(ctx, signed, root, nil); err != nil {
|
||||
if blockchain.IsInvalidBlock(err) {
|
||||
r := blockchain.InvalidBlockRoot(err)
|
||||
@@ -58,79 +55,6 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
return err
|
||||
}
|
||||
|
||||
// reconstructAndBroadcastBlobs processes and broadcasts blob sidecars for a given beacon block.
|
||||
// This function reconstructs the blob sidecars from the EL using the block's KZG commitments,
|
||||
// broadcasts the reconstructed blobs over P2P, and saves them into the blob storage.
|
||||
func (s *Service) reconstructAndBroadcastBlobs(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) {
|
||||
startTime, err := slots.ToTime(uint64(s.cfg.chain.GenesisTime().Unix()), block.Block().Slot())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to convert slot to time")
|
||||
}
|
||||
|
||||
blockRoot, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to calculate block root")
|
||||
return
|
||||
}
|
||||
|
||||
if s.cfg.blobStorage == nil {
|
||||
return
|
||||
}
|
||||
indices, err := s.cfg.blobStorage.Indices(blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to retrieve indices for block")
|
||||
return
|
||||
}
|
||||
for _, index := range indices {
|
||||
if index {
|
||||
blobExistedInDBTotal.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct blob sidecars from the EL
|
||||
blobSidecars, err := s.cfg.executionReconstructor.ReconstructBlobSidecars(ctx, block, blockRoot, indices[:])
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to reconstruct blob sidecars")
|
||||
return
|
||||
}
|
||||
if len(blobSidecars) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Refresh indices as new blobs may have been added to the db
|
||||
indices, err = s.cfg.blobStorage.Indices(blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to retrieve indices for block")
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcast blob sidecars first than save them to the db
|
||||
for _, sidecar := range blobSidecars {
|
||||
if sidecar.Index >= uint64(len(indices)) || indices[sidecar.Index] {
|
||||
continue
|
||||
}
|
||||
if err := s.cfg.p2p.BroadcastBlob(ctx, sidecar.Index, sidecar.BlobSidecar); err != nil {
|
||||
log.WithFields(blobFields(sidecar.ROBlob)).WithError(err).Error("Failed to broadcast blob sidecar")
|
||||
}
|
||||
}
|
||||
|
||||
for _, sidecar := range blobSidecars {
|
||||
if sidecar.Index >= uint64(len(indices)) || indices[sidecar.Index] {
|
||||
blobExistedInDBTotal.Inc()
|
||||
continue
|
||||
}
|
||||
if err := s.subscribeBlob(ctx, sidecar); err != nil {
|
||||
log.WithFields(blobFields(sidecar.ROBlob)).WithError(err).Error("Failed to receive blob")
|
||||
continue
|
||||
}
|
||||
|
||||
blobRecoveredFromELTotal.Inc()
|
||||
fields := blobFields(sidecar.ROBlob)
|
||||
fields["sinceSlotStartTime"] = s.cfg.clock.Now().Sub(startTime)
|
||||
log.WithFields(fields).Debug("Processed blob sidecar from EL")
|
||||
}
|
||||
}
|
||||
|
||||
// WriteInvalidBlockToDisk as a block ssz. Writes to temp directory.
|
||||
func saveInvalidBlockToTemp(block interfaces.ReadOnlySignedBeaconBlock) {
|
||||
if !features.Get().SaveInvalidBlock {
|
||||
|
||||
@@ -9,20 +9,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
chainMock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
mockp2p "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
lruwrpr "github.com/prysmaticlabs/prysm/v5/cache/lru"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -72,9 +66,7 @@ func TestService_beaconBlockSubscriber(t *testing.T) {
|
||||
DB: db,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
attPool: attestations.NewPool(),
|
||||
blobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
executionReconstructor: &mockExecution.EngineClient{},
|
||||
attPool: attestations.NewPool(),
|
||||
},
|
||||
}
|
||||
s.initCaches()
|
||||
@@ -132,65 +124,3 @@ func TestService_BeaconBlockSubscribe_UndefinedEeError(t *testing.T) {
|
||||
require.Equal(t, 0, len(s.badBlockCache.Keys()))
|
||||
require.Equal(t, 1, len(s.seenBlockCache.Keys()))
|
||||
}
|
||||
|
||||
func TestReconstructAndBroadcastBlobs(t *testing.T) {
|
||||
rob, err := blocks.NewROBlob(
|
||||
ðpb.BlobSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: []byte("signature"),
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
chainService := &chainMock.ChainService{
|
||||
Genesis: time.Now(),
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlockDeneb()
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
blobSidecars []blocks.VerifiedROBlob
|
||||
expectedBlobCount int
|
||||
}{
|
||||
{
|
||||
name: "Constructed 0 blobs",
|
||||
blobSidecars: nil,
|
||||
expectedBlobCount: 0,
|
||||
},
|
||||
{
|
||||
name: "Constructed 6 blobs",
|
||||
blobSidecars: []blocks.VerifiedROBlob{
|
||||
{ROBlob: rob}, {ROBlob: rob}, {ROBlob: rob}, {ROBlob: rob}, {ROBlob: rob}, {ROBlob: rob},
|
||||
},
|
||||
expectedBlobCount: 6,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := Service{
|
||||
cfg: &config{
|
||||
p2p: mockp2p.NewTestP2P(t),
|
||||
chain: chainService,
|
||||
clock: startup.NewClock(time.Now(), [32]byte{}),
|
||||
blobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
executionReconstructor: &mockExecution.EngineClient{
|
||||
BlobSidecars: tt.blobSidecars,
|
||||
},
|
||||
operationNotifier: &chainMock.MockOperationNotifier{},
|
||||
},
|
||||
seenBlobCache: lruwrpr.New(1),
|
||||
}
|
||||
s.reconstructAndBroadcastBlobs(context.Background(), sb)
|
||||
require.Equal(t, tt.expectedBlobCount, len(chainService.Blobs))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,10 +16,6 @@ func (s *Service) blobSubscriber(ctx context.Context, msg proto.Message) error {
|
||||
return fmt.Errorf("message was not type blocks.ROBlob, type=%T", msg)
|
||||
}
|
||||
|
||||
return s.subscribeBlob(ctx, b)
|
||||
}
|
||||
|
||||
func (s *Service) subscribeBlob(ctx context.Context, b blocks.VerifiedROBlob) error {
|
||||
s.setSeenBlobIndex(b.Slot(), b.ProposerIndex(), b.Index)
|
||||
|
||||
if err := s.cfg.chain.ReceiveBlob(ctx, b); err != nil {
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestBatchVerifier(t *testing.T) {
|
||||
},
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{CbVerifiedROBlob: vbcb(bl, nil)}
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
|
||||
}
|
||||
},
|
||||
nblobs: 0,
|
||||
@@ -50,7 +50,7 @@ func TestBatchVerifier(t *testing.T) {
|
||||
name: "happy path",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{CbVerifiedROBlob: vbcb(bl, nil)}
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
@@ -62,7 +62,7 @@ func TestBatchVerifier(t *testing.T) {
|
||||
name: "partial batch",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{CbVerifiedROBlob: vbcb(bl, nil)}
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: vbcb(bl, nil)}
|
||||
}
|
||||
},
|
||||
bandb: func(t *testing.T, nb int) (blocks.ROBlock, []blocks.ROBlob) {
|
||||
@@ -76,7 +76,7 @@ func TestBatchVerifier(t *testing.T) {
|
||||
name: "invalid commitment",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{CbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
@@ -93,7 +93,7 @@ func TestBatchVerifier(t *testing.T) {
|
||||
name: "signature mismatch",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{CbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
@@ -111,7 +111,7 @@ func TestBatchVerifier(t *testing.T) {
|
||||
name: "root mismatch",
|
||||
nv: func() NewBlobVerifier {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{CbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
return &MockBlobVerifier{cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
@@ -133,7 +133,7 @@ func TestBatchVerifier(t *testing.T) {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{
|
||||
ErrBlobIndexInBounds: ErrBlobIndexInvalid,
|
||||
CbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
@@ -151,7 +151,7 @@ func TestBatchVerifier(t *testing.T) {
|
||||
return func(bl blocks.ROBlob, reqs []Requirement) BlobVerifier {
|
||||
return &MockBlobVerifier{
|
||||
ErrSidecarInclusionProven: ErrSidecarInclusionProofInvalid,
|
||||
CbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
cbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
t.Fatal("Batch verifier should stop before this point")
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
}}
|
||||
|
||||
@@ -67,10 +67,6 @@ var InitsyncSidecarRequirements = requirementList(GossipSidecarRequirements).exc
|
||||
RequireSidecarProposerExpected,
|
||||
)
|
||||
|
||||
// ELMemPoolRequirements is a list of verification requirements to be used when importing blobs and proof from
|
||||
// execution layer mempool. Only the KZG proof verification is required.
|
||||
var ELMemPoolRequirements = []Requirement{RequireSidecarKzgProofVerified}
|
||||
|
||||
// BackfillSidecarRequirements is the same as InitsyncSidecarRequirements.
|
||||
var BackfillSidecarRequirements = requirementList(InitsyncSidecarRequirements).excluding()
|
||||
|
||||
|
||||
@@ -18,11 +18,11 @@ type MockBlobVerifier struct {
|
||||
ErrSidecarInclusionProven error
|
||||
ErrSidecarKzgProofVerified error
|
||||
ErrSidecarProposerExpected error
|
||||
CbVerifiedROBlob func() (blocks.VerifiedROBlob, error)
|
||||
cbVerifiedROBlob func() (blocks.VerifiedROBlob, error)
|
||||
}
|
||||
|
||||
func (m *MockBlobVerifier) VerifiedROBlob() (blocks.VerifiedROBlob, error) {
|
||||
return m.CbVerifiedROBlob()
|
||||
return m.cbVerifiedROBlob()
|
||||
}
|
||||
|
||||
func (m *MockBlobVerifier) BlobIndexInBounds() (err error) {
|
||||
|
||||
@@ -285,9 +285,7 @@ func startNode(ctx *cli.Context, cancel context.CancelFunc) error {
|
||||
// libp2p specific logging.
|
||||
golog.SetAllLoggers(golog.LevelDebug)
|
||||
// Geth specific logging.
|
||||
glogger := gethlog.NewGlogHandler(gethlog.StreamHandler(os.Stderr, gethlog.TerminalFormat(true)))
|
||||
glogger.Verbosity(gethlog.LvlTrace)
|
||||
gethlog.Root().SetHandler(glogger)
|
||||
gethlog.SetDefault(gethlog.NewLogger(gethlog.NewTerminalHandlerWithLevel(os.Stderr, gethlog.LvlTrace, true)))
|
||||
}
|
||||
|
||||
blockchainFlagOpts, err := blockchaincmd.FlagOptions(ctx)
|
||||
|
||||
Binary file not shown.
@@ -19,7 +19,7 @@ func BeaconNodeOptions(c *cli.Context) ([]node.Option, error) {
|
||||
}
|
||||
// The zero value of this uint flag would be genesis, so we use IsSet to differentiate nil from zero case.
|
||||
if c.IsSet(flags.BackfillOldestSlot.Name) {
|
||||
uv := c.Uint64(flags.BackfillOldestSlot.Name)
|
||||
uv := c.Uint64(flags.BackfillBatchSize.Name)
|
||||
bno = append(bno, backfill.WithMinimumSlot(primitives.Slot(uv)))
|
||||
}
|
||||
node.BackfillOpts = bno
|
||||
|
||||
@@ -164,8 +164,8 @@ type BeaconChainConfig struct {
|
||||
CapellaForkEpoch primitives.Epoch `yaml:"CAPELLA_FORK_EPOCH" spec:"true"` // CapellaForkEpoch is used to represent the assigned fork epoch for capella.
|
||||
DenebForkVersion []byte `yaml:"DENEB_FORK_VERSION" spec:"true"` // DenebForkVersion is used to represent the fork version for deneb.
|
||||
DenebForkEpoch primitives.Epoch `yaml:"DENEB_FORK_EPOCH" spec:"true"` // DenebForkEpoch is used to represent the assigned fork epoch for deneb.
|
||||
ElectraForkVersion []byte `yaml:"ELECTRA_FORK_VERSION" spec:"true"` // ElectraForkVersion is used to represent the fork version for electra.
|
||||
ElectraForkEpoch primitives.Epoch `yaml:"ELECTRA_FORK_EPOCH" spec:"true"` // ElectraForkEpoch is used to represent the assigned fork epoch for electra.
|
||||
ElectraForkVersion []byte `yaml:"ELECTRA_FORK_VERSION" spec:"true"` // ElectraForkVersion is used to represent the fork version for deneb.
|
||||
ElectraForkEpoch primitives.Epoch `yaml:"ELECTRA_FORK_EPOCH" spec:"true"` // ElectraForkEpoch is used to represent the assigned fork epoch for deneb.
|
||||
|
||||
ForkVersionSchedule map[[fieldparams.VersionLength]byte]primitives.Epoch // Schedule of fork epochs by version.
|
||||
ForkVersionNames map[[fieldparams.VersionLength]byte]string // Human-readable names of fork versions.
|
||||
|
||||
@@ -142,6 +142,10 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
|
||||
for _, method := range psl.loadMethods {
|
||||
switch method {
|
||||
case defaultFlag:
|
||||
if psl.existsInDB && len(psl.loadMethods) == 1 {
|
||||
// only log the below if default flag is the only load method
|
||||
log.Warn("Previously saved proposer settings were loaded from the DB, only default settings will be updated. Please provide new proposer settings or clear DB to reset proposer settings.")
|
||||
}
|
||||
suggestedFeeRecipient := cliCtx.String(flags.SuggestedFeeRecipientFlag.Name)
|
||||
if !common.IsHexAddress(suggestedFeeRecipient) {
|
||||
return nil, errors.Errorf("--%s is not a valid Ethereum address", flags.SuggestedFeeRecipientFlag.Name)
|
||||
@@ -155,10 +159,6 @@ func (psl *settingsLoader) Load(cliCtx *cli.Context) (*proposer.Settings, error)
|
||||
if psl.options.builderConfig != nil {
|
||||
defaultConfig.Builder = psl.options.builderConfig.ToConsensus()
|
||||
}
|
||||
if psl.existsInDB && len(psl.loadMethods) == 1 {
|
||||
// only log the below if default flag is the only load method
|
||||
log.Debug("Overriding previously saved proposer default settings.")
|
||||
}
|
||||
loadConfig.DefaultConfig = defaultConfig
|
||||
case fileFlag:
|
||||
var settingFromFile *validatorpb.ProposerSettingsPayload
|
||||
|
||||
169
deps.bzl
169
deps.bzl
@@ -295,8 +295,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_bits_and_blooms_bitset",
|
||||
importpath = "github.com/bits-and-blooms/bitset",
|
||||
sum = "h1:RMyy2mBBShArUAhfVRZJ2xyBO58KCBCtZFShw3umo6k=",
|
||||
version = "v1.11.0",
|
||||
sum = "h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=",
|
||||
version = "v1.13.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_bradfitz_go_smtpd",
|
||||
@@ -313,8 +313,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_btcsuite_btcd_btcec_v2",
|
||||
importpath = "github.com/btcsuite/btcd/btcec/v2",
|
||||
sum = "h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=",
|
||||
version = "v2.3.2",
|
||||
sum = "h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=",
|
||||
version = "v2.3.4",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_btcsuite_btcd_chaincfg_chainhash",
|
||||
@@ -452,8 +452,14 @@ def prysm_deps():
|
||||
name = "com_github_cockroachdb_errors",
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "github.com/cockroachdb/errors",
|
||||
sum = "h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=",
|
||||
version = "v1.11.1",
|
||||
sum = "h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I=",
|
||||
version = "v1.11.3",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_cockroachdb_fifo",
|
||||
importpath = "github.com/cockroachdb/fifo",
|
||||
sum = "h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4=",
|
||||
version = "v0.0.0-20240606204812-0bbfbd93a7ce",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_cockroachdb_logtags",
|
||||
@@ -464,8 +470,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_cockroachdb_pebble",
|
||||
importpath = "github.com/cockroachdb/pebble",
|
||||
sum = "h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A=",
|
||||
version = "v0.0.0-20230928194634-aa077af62593",
|
||||
sum = "h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA=",
|
||||
version = "v1.1.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_cockroachdb_redact",
|
||||
@@ -473,12 +479,6 @@ def prysm_deps():
|
||||
sum = "h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=",
|
||||
version = "v1.1.5",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_cockroachdb_sentry_go",
|
||||
importpath = "github.com/cockroachdb/sentry-go",
|
||||
sum = "h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM=",
|
||||
version = "v0.6.1-cockroachdb.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_cockroachdb_tokenbucket",
|
||||
importpath = "github.com/cockroachdb/tokenbucket",
|
||||
@@ -549,14 +549,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_crate_crypto_go_ipa",
|
||||
importpath = "github.com/crate-crypto/go-ipa",
|
||||
sum = "h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0=",
|
||||
version = "v0.0.0-20230601170251-1830d0757c80",
|
||||
sum = "h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I=",
|
||||
version = "v0.0.0-20240223125850-b1e8a79f509c",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_crate_crypto_go_kzg_4844",
|
||||
importpath = "github.com/crate-crypto/go-kzg-4844",
|
||||
sum = "h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=",
|
||||
version = "v0.7.0",
|
||||
sum = "h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=",
|
||||
version = "v1.0.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_creack_pty",
|
||||
@@ -597,8 +597,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_deckarep_golang_set_v2",
|
||||
importpath = "github.com/deckarep/golang-set/v2",
|
||||
sum = "h1:hn6cEZtQ0h3J8kFrHR/NrzyOoTnjgW1+FmNJzQ7y/sA=",
|
||||
version = "v2.5.0",
|
||||
sum = "h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=",
|
||||
version = "v2.6.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_decred_dcrd_crypto_blake256",
|
||||
@@ -654,6 +654,12 @@ def prysm_deps():
|
||||
sum = "h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=",
|
||||
version = "v0.5.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_donovanhide_eventsource",
|
||||
importpath = "github.com/donovanhide/eventsource",
|
||||
sum = "h1:C7t6eeMaEQVy6e8CarIhscYQlNmw5e3G36y7l7Y21Ao=",
|
||||
version = "v0.0.0-20210830082556-c59027999da0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_dop251_goja",
|
||||
importpath = "github.com/dop251/goja",
|
||||
@@ -740,21 +746,28 @@ def prysm_deps():
|
||||
importpath = "github.com/ethereum/c-kzg-4844",
|
||||
patch_args = ["-p1"],
|
||||
patches = ["//third_party:com_github_ethereum_c_kzg_4844.patch"],
|
||||
sum = "h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=",
|
||||
version = "v0.4.0",
|
||||
sum = "h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA=",
|
||||
version = "v1.0.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_ethereum_go_ethereum",
|
||||
build_directives = [
|
||||
"gazelle:resolve go github.com/karalabe/usb @prysm//third_party/usb:go_default_library",
|
||||
"gazelle:resolve go github.com/karalabe/hid @prysm//third_party/hid:go_default_library",
|
||||
],
|
||||
importpath = "github.com/ethereum/go-ethereum",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"//third_party:com_github_ethereum_go_ethereum_secp256k1.patch",
|
||||
],
|
||||
sum = "h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk=",
|
||||
version = "v1.13.5",
|
||||
sum = "h1:8nFDCUUE67rPc6AKxFj7JKaOa2W/W1Rse3oS6LvvxEY=",
|
||||
version = "v1.14.11",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_ethereum_go_verkle",
|
||||
importpath = "github.com/ethereum/go-verkle",
|
||||
sum = "h1:8NfxH2iXvJ60YRB8ChToFTUzl8awsc3cJ8CbLjGIl/A=",
|
||||
version = "v0.1.1-0.20240829091221-dffa7562dbe9",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_evanphx_json_patch",
|
||||
@@ -765,8 +778,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_fatih_color",
|
||||
importpath = "github.com/fatih/color",
|
||||
sum = "h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=",
|
||||
version = "v1.13.0",
|
||||
sum = "h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=",
|
||||
version = "v1.16.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_fatih_structs",
|
||||
@@ -783,8 +796,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_ferranbt_fastssz",
|
||||
importpath = "github.com/ferranbt/fastssz",
|
||||
sum = "h1:9VDpsWq096+oGMDTT/SgBD/VgZYf4pTF+KTPmZ+OaKM=",
|
||||
version = "v0.0.0-20210120143747-11b9eff30ea9",
|
||||
sum = "h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo=",
|
||||
version = "v0.1.3",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_fjl_gencodec",
|
||||
@@ -792,12 +805,6 @@ def prysm_deps():
|
||||
sum = "h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY=",
|
||||
version = "v0.0.0-20230517082657-f9840df7b83e",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_fjl_memsize",
|
||||
importpath = "github.com/fjl/memsize",
|
||||
sum = "h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=",
|
||||
version = "v0.0.0-20190710130421-bcb5799ab5e5",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_flosch_pongo2_v4",
|
||||
importpath = "github.com/flosch/pongo2/v4",
|
||||
@@ -879,8 +886,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_gballet_go_verkle",
|
||||
importpath = "github.com/gballet/go-verkle",
|
||||
sum = "h1:vMT47RYsrftsHSTQhqXwC3BYflo38OLC3Y4LtXtLyU0=",
|
||||
version = "v0.0.0-20230607174250-df487255f46b",
|
||||
sum = "h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE=",
|
||||
version = "v0.1.1-0.20231031103413-a67434b50f46",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_gdamore_encoding",
|
||||
@@ -891,8 +898,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_gdamore_tcell_v2",
|
||||
importpath = "github.com/gdamore/tcell/v2",
|
||||
sum = "h1:OKbluoP9VYmJwZwq/iLb4BxwKcwGthaa1YNBJIyCySg=",
|
||||
version = "v2.6.0",
|
||||
sum = "h1:I5LiGTQuwrysAt1KS9wg1yFfOI3arI3ucFrxtd/xqaA=",
|
||||
version = "v2.7.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_getkin_kin_openapi",
|
||||
@@ -903,8 +910,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_getsentry_sentry_go",
|
||||
importpath = "github.com/getsentry/sentry-go",
|
||||
sum = "h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI=",
|
||||
version = "v0.25.0",
|
||||
sum = "h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=",
|
||||
version = "v0.27.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_ghemawat_stream",
|
||||
@@ -1163,8 +1170,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_golang_snappy",
|
||||
importpath = "github.com/golang/snappy",
|
||||
sum = "h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=",
|
||||
version = "v0.0.5-0.20220116011046-fa5810519dcb",
|
||||
sum = "h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc=",
|
||||
version = "v0.0.5-0.20231225225746-43d5d4cd4e0e",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_golangci_lint_1",
|
||||
@@ -1493,14 +1500,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_herumi_bls_eth_go_binary",
|
||||
importpath = "github.com/herumi/bls-eth-go-binary",
|
||||
sum = "h1:wCMygKUQhmcQAjlk2Gquzq6dLmyMv2kF+llRspoRgrk=",
|
||||
version = "v0.0.0-20210917013441-d37c07cfda4e",
|
||||
sum = "h1:9eeW3EA4epCb7FIHt2luENpAW69MvKGL5jieHlBiP+w=",
|
||||
version = "v1.31.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_holiman_billy",
|
||||
importpath = "github.com/holiman/billy",
|
||||
sum = "h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=",
|
||||
version = "v0.0.0-20230718173358-1c7e68d277a7",
|
||||
sum = "h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4=",
|
||||
version = "v0.0.0-20240216141850-2abb0c79d3c4",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_holiman_bloomfilter_v2",
|
||||
@@ -1511,14 +1518,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_holiman_goevmlab",
|
||||
importpath = "github.com/holiman/goevmlab",
|
||||
sum = "h1:J973NLskKmFIj3EGfpQ1ztUQKdwyJ+fG34638ief0eA=",
|
||||
version = "v0.0.0-20231201084119-c73b3c97929c",
|
||||
sum = "h1:MnqrjbCnYO6ImEJiqza1vTcSFJno9cErjWG9ONXJgoc=",
|
||||
version = "v0.0.0-20240515165425-8414a52dc9d4",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_holiman_uint256",
|
||||
importpath = "github.com/holiman/uint256",
|
||||
sum = "h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=",
|
||||
version = "v1.2.4",
|
||||
sum = "h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs=",
|
||||
version = "v1.3.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_hpcloud_tail",
|
||||
@@ -1745,10 +1752,10 @@ def prysm_deps():
|
||||
version = "v0.0.0-20180517002512-3bf9e2903213",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_karalabe_usb",
|
||||
importpath = "github.com/karalabe/usb",
|
||||
sum = "h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs=",
|
||||
version = "v0.0.3-0.20230711191512-61db3e06439c",
|
||||
name = "com_github_karalabe_hid",
|
||||
importpath = "github.com/karalabe/hid",
|
||||
sum = "h1:msKODTL1m0wigztaqILOtla9HeW1ciscYG4xjLtvk5I=",
|
||||
version = "v1.0.1-0.20240306101548-573246063e52",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_kataras_blocks",
|
||||
@@ -2064,14 +2071,14 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_mariusvanderwijden_fuzzyvm",
|
||||
importpath = "github.com/MariusVanDerWijden/FuzzyVM",
|
||||
sum = "h1:BwEuC3xavrv4HTUDH2fUrKgKooiH3Q/nSVnFGtnzpN0=",
|
||||
version = "v0.0.0-20240209103030-ec53fa766bf8",
|
||||
sum = "h1:RQtzNvriR3Yu5CvVBTJPwDmfItBT90TWZ3fFondhc08=",
|
||||
version = "v0.0.0-20240516070431-7828990cad7d",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_mariusvanderwijden_tx_fuzz",
|
||||
importpath = "github.com/MariusVanDerWijden/tx-fuzz",
|
||||
sum = "h1:QDTh0xHorSykJ4+2VccBADMeRAVUbnHaWrCPIMtN+Vc=",
|
||||
version = "v1.3.3-0.20240227085032-f70dd7c85c97",
|
||||
sum = "h1:Tq4lXivsR8mtoP4RpasUDIUpDLHfN1YhFge/kzrzK78=",
|
||||
version = "v1.4.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_marten_seemann_tcp",
|
||||
@@ -2130,8 +2137,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_microsoft_go_winio",
|
||||
importpath = "github.com/Microsoft/go-winio",
|
||||
sum = "h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=",
|
||||
version = "v0.6.1",
|
||||
sum = "h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=",
|
||||
version = "v0.6.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_miekg_dns",
|
||||
@@ -2775,8 +2782,20 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_protolambda_bls12_381_util",
|
||||
importpath = "github.com/protolambda/bls12-381-util",
|
||||
sum = "h1:cZC+usqsYgHtlBaGulVnZ1hfKAi8iWtujBnRLQE698c=",
|
||||
version = "v0.0.0-20220416220906-d8552aa452c7",
|
||||
sum = "h1:05DU2wJN7DTU7z28+Q+zejXkIsA/MF8JZQGhtBZZiWk=",
|
||||
version = "v0.1.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_protolambda_zrnt",
|
||||
importpath = "github.com/protolambda/zrnt",
|
||||
sum = "h1:KZ48T+3UhsPXNdtE/5QEvGc9DGjUaRI17nJaoznoIaM=",
|
||||
version = "v0.32.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_protolambda_ztyp",
|
||||
importpath = "github.com/protolambda/ztyp",
|
||||
sum = "h1:rVcL3vBu9W/aV646zF6caLS/dyn9BN8NYiuJzicLNyY=",
|
||||
version = "v0.2.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_fastssz",
|
||||
@@ -2854,8 +2873,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_rivo_tview",
|
||||
importpath = "github.com/rivo/tview",
|
||||
sum = "h1:7UMY2qN9VlcY+x9jlhpYe5Bf1zrdhvmfZyLMk2u65BM=",
|
||||
version = "v0.0.0-20231126152417-33a1d271f2b6",
|
||||
sum = "h1:QLKAX9JLJ9RJVjnywcVg/U8nKNZvdftCtJRv1qzALYI=",
|
||||
version = "v0.0.0-20240118093911-742cf086196e",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_rivo_uniseg",
|
||||
@@ -3104,8 +3123,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_sirupsen_logrus",
|
||||
importpath = "github.com/sirupsen/logrus",
|
||||
sum = "h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=",
|
||||
version = "v1.9.0",
|
||||
sum = "h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=",
|
||||
version = "v1.9.3",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_smartystreets_assertions",
|
||||
@@ -3317,6 +3336,12 @@ def prysm_deps():
|
||||
sum = "h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=",
|
||||
version = "v1.2.7",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_umbracle_gohashtree",
|
||||
importpath = "github.com/umbracle/gohashtree",
|
||||
sum = "h1:CQh33pStIp/E30b7TxDlXfM0145bn2e8boI30IxAhTg=",
|
||||
version = "v0.0.2-alpha.0.20230207094856-5b775a815c10",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_urfave_cli",
|
||||
importpath = "github.com/urfave/cli",
|
||||
@@ -3326,8 +3351,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_urfave_cli_v2",
|
||||
importpath = "github.com/urfave/cli/v2",
|
||||
sum = "h1:3f3AMg3HpThFNT4I++TKOejZO8yU55t3JnnSr4S4QEI=",
|
||||
version = "v2.26.0",
|
||||
sum = "h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho=",
|
||||
version = "v2.27.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_urfave_negroni",
|
||||
@@ -3407,8 +3432,8 @@ def prysm_deps():
|
||||
"gazelle:resolve go github.com/herumi/bls-eth-go-binary/bls @herumi_bls_eth_go_binary//:go_default_library",
|
||||
],
|
||||
importpath = "github.com/wealdtech/go-eth2-types/v2",
|
||||
sum = "h1:tiA6T88M6XQIbrV5Zz53l1G5HtRERcxQfmET225V4Ls=",
|
||||
version = "v2.5.2",
|
||||
sum = "h1:b5aXlNBLKgjAg/Fft9VvGlqAUCQMP5LzYhlHRrr4yPg=",
|
||||
version = "v2.8.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_wealdtech_go_eth2_util",
|
||||
@@ -4397,8 +4422,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "in_gopkg_natefinch_lumberjack_v2",
|
||||
importpath = "gopkg.in/natefinch/lumberjack.v2",
|
||||
sum = "h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=",
|
||||
version = "v2.0.0",
|
||||
sum = "h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=",
|
||||
version = "v2.2.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "in_gopkg_redis_v4",
|
||||
|
||||
55
go.mod
55
go.mod
@@ -5,19 +5,18 @@ go 1.22.0
|
||||
toolchain go1.22.4
|
||||
|
||||
require (
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240209103030-ec53fa766bf8
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.3.3-0.20240227085032-f70dd7c85c97
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240516070431-7828990cad7d
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.4.0
|
||||
github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96
|
||||
github.com/bazelbuild/rules_go v0.23.2
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4
|
||||
github.com/consensys/gnark-crypto v0.12.1
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0
|
||||
github.com/d4l3k/messagediff v1.2.1
|
||||
github.com/dgraph-io/ristretto v0.0.4-0.20210318174700-74754f61e018
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/emicklei/dot v0.11.0
|
||||
github.com/ethereum/go-ethereum v1.13.5
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5
|
||||
github.com/ethereum/go-ethereum v1.14.11
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible
|
||||
@@ -25,7 +24,7 @@ require (
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/golang/gddo v0.0.0-20200528160355-8d077c1d8f4c
|
||||
github.com/golang/protobuf v1.5.4
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb
|
||||
github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/gofuzz v1.2.0
|
||||
github.com/google/uuid v1.6.0
|
||||
@@ -33,8 +32,8 @@ require (
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d
|
||||
github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e
|
||||
github.com/holiman/uint256 v1.2.4
|
||||
github.com/herumi/bls-eth-go-binary v1.31.0
|
||||
github.com/holiman/uint256 v1.3.1
|
||||
github.com/ianlancetaylor/cgosymbolizer v0.0.0-20200424224625-be1b05b0b279
|
||||
github.com/ipfs/go-log/v2 v2.5.1
|
||||
github.com/jedib0t/go-pretty/v6 v6.5.4
|
||||
@@ -69,15 +68,15 @@ require (
|
||||
github.com/r3labs/sse/v2 v2.10.0
|
||||
github.com/rs/cors v1.7.0
|
||||
github.com/schollz/progressbar/v3 v3.3.4
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.10.0
|
||||
github.com/status-im/keycard-go v0.2.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/supranational/blst v0.3.11
|
||||
github.com/supranational/blst v0.3.13
|
||||
github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e
|
||||
github.com/trailofbits/go-mutexasserts v0.0.0-20230328101604-8cdbc5f3d279
|
||||
github.com/tyler-smith/go-bip39 v1.1.0
|
||||
github.com/urfave/cli/v2 v2.26.0
|
||||
github.com/urfave/cli/v2 v2.27.1
|
||||
github.com/uudashr/gocognit v1.0.5
|
||||
github.com/wealdtech/go-bytesutil v1.1.1
|
||||
github.com/wealdtech/go-eth2-util v1.6.3
|
||||
@@ -109,43 +108,45 @@ require (
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/DataDog/zstd v1.5.5 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.11.0 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/cespare/cp v1.1.1 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chzyer/readline v1.5.1 // indirect
|
||||
github.com/cockroachdb/errors v1.11.1 // indirect
|
||||
github.com/cockroachdb/errors v1.11.3 // indirect
|
||||
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect
|
||||
github.com/cockroachdb/pebble v1.1.2 // indirect
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.5.0 // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.6.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/dlclark/regexp2 v1.7.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect
|
||||
github.com/elastic/gosigar v0.14.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
|
||||
github.com/ferranbt/fastssz v0.0.0-20210120143747-11b9eff30ea9 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.0 // indirect
|
||||
github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 // indirect
|
||||
github.com/ferranbt/fastssz v0.1.3 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/getsentry/sentry-go v0.25.0 // indirect
|
||||
github.com/getsentry/sentry-go v0.27.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
@@ -156,9 +157,9 @@ require (
|
||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/goevmlab v0.0.0-20231201084119-c73b3c97929c // indirect
|
||||
github.com/holiman/goevmlab v0.0.0-20240515165425-8414a52dc9d4 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c // indirect
|
||||
@@ -167,7 +168,7 @@ require (
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a // indirect
|
||||
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c // indirect
|
||||
github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||
github.com/koron/go-ssdp v0.0.4 // indirect
|
||||
@@ -245,7 +246,7 @@ require (
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
github.com/tklauser/numcpus v0.7.0 // indirect
|
||||
github.com/wealdtech/go-eth2-types/v2 v2.5.2 // indirect
|
||||
github.com/wealdtech/go-eth2-types/v2 v2.8.2 // indirect
|
||||
github.com/wlynxg/anet v0.0.4 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
@@ -262,7 +263,7 @@ require (
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
lukechampine.com/blake3 v1.3.0 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
@@ -273,7 +274,7 @@ require (
|
||||
|
||||
require (
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
|
||||
112
go.sum
112
go.sum
@@ -51,12 +51,12 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
||||
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
|
||||
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240209103030-ec53fa766bf8 h1:BwEuC3xavrv4HTUDH2fUrKgKooiH3Q/nSVnFGtnzpN0=
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240209103030-ec53fa766bf8/go.mod h1:L1QpLBqXlboJMOC2hndG95d1eiElzKsBhjzcuy8pxeM=
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.3.3-0.20240227085032-f70dd7c85c97 h1:QDTh0xHorSykJ4+2VccBADMeRAVUbnHaWrCPIMtN+Vc=
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.3.3-0.20240227085032-f70dd7c85c97/go.mod h1:xcjGtET6+7KeDHcwLQp3sIfyFALtoTjzZgY8Y+RUozM=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240516070431-7828990cad7d h1:RQtzNvriR3Yu5CvVBTJPwDmfItBT90TWZ3fFondhc08=
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240516070431-7828990cad7d/go.mod h1:gWTykV/ZinShgltWofTEJY4TsletuvGhB6l4+Ai2F+E=
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.4.0 h1:Tq4lXivsR8mtoP4RpasUDIUpDLHfN1YhFge/kzrzK78=
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.4.0/go.mod h1:gmOVECg7o5FY5VU3DQ/fY0zTk/ExBdMkUGz0vA8qqms=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
@@ -100,12 +100,12 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bits-and-blooms/bitset v1.11.0 h1:RMyy2mBBShArUAhfVRZJ2xyBO58KCBCtZFShw3umo6k=
|
||||
github.com/bits-and-blooms/bitset v1.11.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
@@ -141,12 +141,14 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
|
||||
github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
|
||||
github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I=
|
||||
github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8=
|
||||
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4=
|
||||
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A=
|
||||
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo=
|
||||
github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA=
|
||||
github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU=
|
||||
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
|
||||
@@ -172,8 +174,10 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c h1:uQYC5Z1mdLRPrZhHjHxufI8+2UG/i25QG92j0Er9p6I=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0 h1:TsSgHwrkTKecKJ4kadtHi4b3xHW5dCFUDFnUp1TsawI=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
@@ -184,8 +188,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/deckarep/golang-set/v2 v2.5.0 h1:hn6cEZtQ0h3J8kFrHR/NrzyOoTnjgW1+FmNJzQ7y/sA=
|
||||
github.com/deckarep/golang-set/v2 v2.5.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
|
||||
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
|
||||
@@ -231,17 +235,18 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=
|
||||
github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk=
|
||||
github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.0 h1:0X1LBXxaEtYD9xsyj9B9ctQEZIpnvVDeoBx8aHEwTNA=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/go-ethereum v1.14.11 h1:8nFDCUUE67rPc6AKxFj7JKaOa2W/W1Rse3oS6LvvxEY=
|
||||
github.com/ethereum/go-ethereum v1.14.11/go.mod h1:+l/fr42Mma+xBnhefL/+z11/hcmJ2egl+ScIVPjhc7E=
|
||||
github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9 h1:8NfxH2iXvJ60YRB8ChToFTUzl8awsc3cJ8CbLjGIl/A=
|
||||
github.com/ethereum/go-verkle v0.1.1-0.20240829091221-dffa7562dbe9/go.mod h1:M3b90YRnzqKyyzBEWJGqj8Qff4IDeXnzFw0P9bFw3uk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/ferranbt/fastssz v0.0.0-20210120143747-11b9eff30ea9 h1:9VDpsWq096+oGMDTT/SgBD/VgZYf4pTF+KTPmZ+OaKM=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/ferranbt/fastssz v0.0.0-20210120143747-11b9eff30ea9/go.mod h1:DyEu2iuLBnb/T51BlsiO3yLYdJC6UbGMrIkqK1KmQxM=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
|
||||
github.com/ferranbt/fastssz v0.1.3 h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo=
|
||||
github.com/ferranbt/fastssz v0.1.3/go.mod h1:0Y9TEd/9XuFlh7mskMPfXiI2Dkw4Ddg9EyXt1W7MRvE=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
@@ -262,8 +267,8 @@ github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8x
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getsentry/sentry-go v0.25.0 h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI=
|
||||
github.com/getsentry/sentry-go v0.25.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
|
||||
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
|
||||
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
@@ -309,8 +314,6 @@ github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5Nq
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
@@ -370,8 +373,8 @@ github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8l
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e h1:4bw4WeyTYPp0smaXiJZCNnLrvVBqirQVreixayXezGc=
|
||||
github.com/golang/snappy v0.0.5-0.20231225225746-43d5d4cd4e0e/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
@@ -480,16 +483,16 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/herumi/bls-eth-go-binary v0.0.0-20210130185500-57372fb27371/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U=
|
||||
github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e h1:wCMygKUQhmcQAjlk2Gquzq6dLmyMv2kF+llRspoRgrk=
|
||||
github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U=
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
|
||||
github.com/herumi/bls-eth-go-binary v1.31.0 h1:9eeW3EA4epCb7FIHt2luENpAW69MvKGL5jieHlBiP+w=
|
||||
github.com/herumi/bls-eth-go-binary v1.31.0/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U=
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4=
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/goevmlab v0.0.0-20231201084119-c73b3c97929c h1:J973NLskKmFIj3EGfpQ1ztUQKdwyJ+fG34638ief0eA=
|
||||
github.com/holiman/goevmlab v0.0.0-20231201084119-c73b3c97929c/go.mod h1:K6KFgcQq1U9ksldcRyLYcwtj4nUTPn4rEaZtX4Gjofc=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/holiman/goevmlab v0.0.0-20240515165425-8414a52dc9d4 h1:MnqrjbCnYO6ImEJiqza1vTcSFJno9cErjWG9ONXJgoc=
|
||||
github.com/holiman/goevmlab v0.0.0-20240515165425-8414a52dc9d4/go.mod h1:3JURK6FtzpaEjCVxopnYN37phjHNI5gW9fhsKFF51Us=
|
||||
github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs=
|
||||
github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
@@ -534,12 +537,11 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
|
||||
github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU=
|
||||
github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 h1:qGQQKEcAR99REcMpsXCp3lJ03zYT1PkRd3kQGPn9GVg=
|
||||
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
|
||||
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs=
|
||||
github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52 h1:msKODTL1m0wigztaqILOtla9HeW1ciscYG4xjLtvk5I=
|
||||
github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52/go.mod h1:qk1sX/IBgppQNcGCRoj90u6EGC056EBoIc1oEjCWla8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
@@ -628,7 +630,6 @@ github.com/mattn/go-colorable v0.0.10-0.20170816031813-ad5389df28cd/go.mod h1:9v
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
@@ -971,8 +972,8 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
@@ -1017,8 +1018,8 @@ github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
|
||||
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
|
||||
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
@@ -1038,11 +1039,13 @@ github.com/trailofbits/go-mutexasserts v0.0.0-20230328101604-8cdbc5f3d279 h1:+Ly
|
||||
github.com/trailofbits/go-mutexasserts v0.0.0-20230328101604-8cdbc5f3d279/go.mod h1:GA3+Mq3kt3tYAfM0WZCu7ofy+GW9PuGysHfhr+6JX7s=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10 h1:CQh33pStIp/E30b7TxDlXfM0145bn2e8boI30IxAhTg=
|
||||
github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10/go.mod h1:x/Pa0FF5Te9kdrlZKJK82YmAkvL8+f989USgz6Jiw7M=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.26.0 h1:3f3AMg3HpThFNT4I++TKOejZO8yU55t3JnnSr4S4QEI=
|
||||
github.com/urfave/cli/v2 v2.26.0/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho=
|
||||
github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=
|
||||
github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
@@ -1052,8 +1055,9 @@ github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49u
|
||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||
github.com/wealdtech/go-bytesutil v1.1.1 h1:ocEg3Ke2GkZ4vQw5lp46rmO+pfqCCTgq35gqOy8JKVc=
|
||||
github.com/wealdtech/go-bytesutil v1.1.1/go.mod h1:jENeMqeTEU8FNZyDFRVc7KqBdRKSnJ9CCh26TcuNb9s=
|
||||
github.com/wealdtech/go-eth2-types/v2 v2.5.2 h1:tiA6T88M6XQIbrV5Zz53l1G5HtRERcxQfmET225V4Ls=
|
||||
github.com/wealdtech/go-eth2-types/v2 v2.5.2/go.mod h1:8lkNUbgklSQ4LZ2oMSuxSdR7WwJW3L9ge1dcoCVyzws=
|
||||
github.com/wealdtech/go-eth2-types/v2 v2.8.2 h1:b5aXlNBLKgjAg/Fft9VvGlqAUCQMP5LzYhlHRrr4yPg=
|
||||
github.com/wealdtech/go-eth2-types/v2 v2.8.2/go.mod h1:IAz9Lz1NVTaHabQa+4zjk2QDKMv8LVYo0n46M9o/TXw=
|
||||
github.com/wealdtech/go-eth2-util v1.6.3 h1:2INPeOR35x5LdFFpSzyw954WzTD+DFyHe3yKlJnG5As=
|
||||
github.com/wealdtech/go-eth2-util v1.6.3/go.mod h1:0hFMj/qtio288oZFHmAbCnPQ9OB3c4WFzs5NVPKTY4k=
|
||||
github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3 h1:SxrDVSr+oXuT1x8kZt4uWqNCvv5xXEGV9zd7cuSrZS8=
|
||||
@@ -1630,8 +1634,8 @@ gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eR
|
||||
gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
|
||||
gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
|
||||
gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type ExecutionPayloadElectra = ExecutionPayloadDeneb
|
||||
@@ -20,124 +19,59 @@ var (
|
||||
crSize = crExample.SizeSSZ()
|
||||
)
|
||||
|
||||
const (
|
||||
DepositRequestType = iota
|
||||
WithdrawalRequestType
|
||||
ConsolidationRequestType
|
||||
)
|
||||
const LenExecutionRequestsElectra = 3
|
||||
|
||||
func (ebe *ExecutionBundleElectra) GetDecodedExecutionRequests() (*ExecutionRequests, error) {
|
||||
requests := &ExecutionRequests{}
|
||||
var prevTypeNum *uint8
|
||||
for i := range ebe.ExecutionRequests {
|
||||
requestType, requestListInSSZBytes, err := decodeExecutionRequest(ebe.ExecutionRequests[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if prevTypeNum != nil && *prevTypeNum >= requestType {
|
||||
return nil, errors.New("invalid execution request type order or duplicate requests, requests should be in sorted order and unique")
|
||||
}
|
||||
prevTypeNum = &requestType
|
||||
switch requestType {
|
||||
case DepositRequestType:
|
||||
drs, err := unmarshalDeposits(requestListInSSZBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requests.Deposits = drs
|
||||
case WithdrawalRequestType:
|
||||
wrs, err := unmarshalWithdrawals(requestListInSSZBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requests.Withdrawals = wrs
|
||||
case ConsolidationRequestType:
|
||||
crs, err := unmarshalConsolidations(requestListInSSZBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requests.Consolidations = crs
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported request type %d", requestType)
|
||||
}
|
||||
|
||||
if len(ebe.ExecutionRequests) != LenExecutionRequestsElectra /* types of requests */ {
|
||||
return nil, errors.Errorf("invalid execution request size: %d", len(ebe.ExecutionRequests))
|
||||
}
|
||||
|
||||
// deposit requests
|
||||
drs, err := unmarshalItems(ebe.ExecutionRequests[0], drSize, func() *DepositRequest { return &DepositRequest{} })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requests.Deposits = drs
|
||||
|
||||
// withdrawal requests
|
||||
wrs, err := unmarshalItems(ebe.ExecutionRequests[1], wrSize, func() *WithdrawalRequest { return &WithdrawalRequest{} })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requests.Withdrawals = wrs
|
||||
|
||||
// consolidation requests
|
||||
crs, err := unmarshalItems(ebe.ExecutionRequests[2], crSize, func() *ConsolidationRequest { return &ConsolidationRequest{} })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requests.Consolidations = crs
|
||||
|
||||
return requests, nil
|
||||
}
|
||||
|
||||
func unmarshalDeposits(requestListInSSZBytes []byte) ([]*DepositRequest, error) {
|
||||
if len(requestListInSSZBytes) < drSize {
|
||||
return nil, errors.New("invalid deposit requests length, requests should be at least the size of 1 request")
|
||||
}
|
||||
if uint64(len(requestListInSSZBytes)) > uint64(drSize)*params.BeaconConfig().MaxDepositRequestsPerPayload {
|
||||
return nil, fmt.Errorf("invalid deposit requests length, requests should not be more than the max per payload, got %d max %d", len(requestListInSSZBytes), drSize)
|
||||
}
|
||||
return unmarshalItems(requestListInSSZBytes, drSize, func() *DepositRequest { return &DepositRequest{} })
|
||||
}
|
||||
|
||||
func unmarshalWithdrawals(requestListInSSZBytes []byte) ([]*WithdrawalRequest, error) {
|
||||
if len(requestListInSSZBytes) < wrSize {
|
||||
return nil, errors.New("invalid withdrawal request length, requests should be at least the size of 1 request")
|
||||
}
|
||||
if uint64(len(requestListInSSZBytes)) > uint64(wrSize)*params.BeaconConfig().MaxWithdrawalRequestsPerPayload {
|
||||
return nil, fmt.Errorf("invalid withdrawal requests length, requests should not be more than the max per payload, got %d max %d", len(requestListInSSZBytes), wrSize)
|
||||
}
|
||||
return unmarshalItems(requestListInSSZBytes, wrSize, func() *WithdrawalRequest { return &WithdrawalRequest{} })
|
||||
}
|
||||
|
||||
func unmarshalConsolidations(requestListInSSZBytes []byte) ([]*ConsolidationRequest, error) {
|
||||
if len(requestListInSSZBytes) < crSize {
|
||||
return nil, errors.New("invalid consolidations request length, requests should be at least the size of 1 request")
|
||||
}
|
||||
if uint64(len(requestListInSSZBytes)) > uint64(crSize)*params.BeaconConfig().MaxConsolidationsRequestsPerPayload {
|
||||
return nil, fmt.Errorf("invalid consolidation requests length, requests should not be more than the max per payload, got %d max %d", len(requestListInSSZBytes), crSize)
|
||||
}
|
||||
return unmarshalItems(requestListInSSZBytes, crSize, func() *ConsolidationRequest { return &ConsolidationRequest{} })
|
||||
}
|
||||
|
||||
func decodeExecutionRequest(req []byte) (typ uint8, data []byte, err error) {
|
||||
if len(req) < 1 {
|
||||
return 0, nil, errors.New("invalid execution request, length less than 1")
|
||||
}
|
||||
return req[0], req[1:], nil
|
||||
}
|
||||
|
||||
func EncodeExecutionRequests(requests *ExecutionRequests) ([]hexutil.Bytes, error) {
|
||||
if requests == nil {
|
||||
return nil, errors.New("invalid execution requests")
|
||||
}
|
||||
|
||||
requestsData := make([]hexutil.Bytes, 0)
|
||||
|
||||
// request types MUST be in sorted order starting from 0
|
||||
if len(requests.Deposits) > 0 {
|
||||
drBytes, err := marshalItems(requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal deposit requests")
|
||||
}
|
||||
requestData := []byte{DepositRequestType}
|
||||
requestData = append(requestData, drBytes...)
|
||||
requestsData = append(requestsData, requestData)
|
||||
}
|
||||
if len(requests.Withdrawals) > 0 {
|
||||
wrBytes, err := marshalItems(requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal withdrawal requests")
|
||||
}
|
||||
requestData := []byte{WithdrawalRequestType}
|
||||
requestData = append(requestData, wrBytes...)
|
||||
requestsData = append(requestsData, requestData)
|
||||
}
|
||||
if len(requests.Consolidations) > 0 {
|
||||
crBytes, err := marshalItems(requests.Consolidations)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal consolidation requests")
|
||||
}
|
||||
requestData := []byte{ConsolidationRequestType}
|
||||
requestData = append(requestData, crBytes...)
|
||||
requestsData = append(requestsData, requestData)
|
||||
drBytes, err := marshalItems(requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal deposit requests")
|
||||
}
|
||||
|
||||
return requestsData, nil
|
||||
wrBytes, err := marshalItems(requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal withdrawal requests")
|
||||
}
|
||||
|
||||
crBytes, err := marshalItems(requests.Consolidations)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal consolidation requests")
|
||||
}
|
||||
return []hexutil.Bytes{drBytes, wrBytes, crBytes}, nil
|
||||
}
|
||||
|
||||
type sszUnmarshaler interface {
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -12,189 +11,6 @@ import (
|
||||
|
||||
var depositRequestsSSZHex = "0x706b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000077630000000000000000000000000000000000000000000000000000000000007b00000000000000736967000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c801000000000000706b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000776300000000000000000000000000000000000000000000000000000000000090010000000000007369670000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000"
|
||||
|
||||
func TestGetDecodedExecutionRequests(t *testing.T) {
|
||||
t.Run("All requests decode successfully", func(t *testing.T) {
|
||||
depositRequestBytes, err := hexutil.Decode("0x610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"620000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"4059730700000063000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"00000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
withdrawalRequestBytes, err := hexutil.Decode("0x6400000000000000000000000000000000000000" +
|
||||
"6500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040597307000000")
|
||||
require.NoError(t, err)
|
||||
consolidationRequestBytes, err := hexutil.Decode("0x6600000000000000000000000000000000000000" +
|
||||
"670000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
ebe := &enginev1.ExecutionBundleElectra{
|
||||
ExecutionRequests: [][]byte{append([]byte{uint8(enginev1.DepositRequestType)}, depositRequestBytes...),
|
||||
append([]byte{uint8(enginev1.WithdrawalRequestType)}, withdrawalRequestBytes...),
|
||||
append([]byte{uint8(enginev1.ConsolidationRequestType)}, consolidationRequestBytes...)},
|
||||
}
|
||||
requests, err := ebe.GetDecodedExecutionRequests()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(requests.Deposits), 1)
|
||||
require.Equal(t, len(requests.Withdrawals), 1)
|
||||
require.Equal(t, len(requests.Consolidations), 1)
|
||||
})
|
||||
t.Run("Excluded requests still decode successfully when one request is missing", func(t *testing.T) {
|
||||
depositRequestBytes, err := hexutil.Decode("0x610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"620000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"4059730700000063000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"00000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
consolidationRequestBytes, err := hexutil.Decode("0x6600000000000000000000000000000000000000" +
|
||||
"670000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
ebe := &enginev1.ExecutionBundleElectra{
|
||||
ExecutionRequests: [][]byte{append([]byte{uint8(enginev1.DepositRequestType)}, depositRequestBytes...), append([]byte{uint8(enginev1.ConsolidationRequestType)}, consolidationRequestBytes...)},
|
||||
}
|
||||
requests, err := ebe.GetDecodedExecutionRequests()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(requests.Deposits), 1)
|
||||
require.Equal(t, len(requests.Withdrawals), 0)
|
||||
require.Equal(t, len(requests.Consolidations), 1)
|
||||
})
|
||||
t.Run("Decode execution requests should fail if ordering is not sorted", func(t *testing.T) {
|
||||
depositRequestBytes, err := hexutil.Decode("0x610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"620000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"4059730700000063000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"00000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
consolidationRequestBytes, err := hexutil.Decode("0x6600000000000000000000000000000000000000" +
|
||||
"670000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
ebe := &enginev1.ExecutionBundleElectra{
|
||||
ExecutionRequests: [][]byte{append([]byte{uint8(enginev1.ConsolidationRequestType)}, consolidationRequestBytes...), append([]byte{uint8(enginev1.DepositRequestType)}, depositRequestBytes...)},
|
||||
}
|
||||
_, err = ebe.GetDecodedExecutionRequests()
|
||||
require.ErrorContains(t, "invalid execution request type order", err)
|
||||
})
|
||||
t.Run("Requests should error if the request type is shorter than 1 byte", func(t *testing.T) {
|
||||
consolidationRequestBytes, err := hexutil.Decode("0x6600000000000000000000000000000000000000" +
|
||||
"670000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
ebe := &enginev1.ExecutionBundleElectra{
|
||||
ExecutionRequests: [][]byte{append([]byte{}, []byte{}...), append([]byte{uint8(enginev1.ConsolidationRequestType)}, consolidationRequestBytes...)},
|
||||
}
|
||||
_, err = ebe.GetDecodedExecutionRequests()
|
||||
require.ErrorContains(t, "invalid execution request, length less than 1", err)
|
||||
})
|
||||
t.Run("a duplicate request should fail", func(t *testing.T) {
|
||||
withdrawalRequestBytes, err := hexutil.Decode("0x6400000000000000000000000000000000000000" +
|
||||
"6500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040597307000000")
|
||||
require.NoError(t, err)
|
||||
withdrawalRequestBytes2, err := hexutil.Decode("0x6400000000000000000000000000000000000000" +
|
||||
"6500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040597307000000")
|
||||
require.NoError(t, err)
|
||||
ebe := &enginev1.ExecutionBundleElectra{
|
||||
ExecutionRequests: [][]byte{append([]byte{uint8(enginev1.WithdrawalRequestType)}, withdrawalRequestBytes...), append([]byte{uint8(enginev1.WithdrawalRequestType)}, withdrawalRequestBytes2...)},
|
||||
}
|
||||
_, err = ebe.GetDecodedExecutionRequests()
|
||||
require.ErrorContains(t, "requests should be in sorted order and unique", err)
|
||||
})
|
||||
t.Run("a duplicate withdrawals ( non 0 request type )request should fail", func(t *testing.T) {
|
||||
depositRequestBytes, err := hexutil.Decode("0x610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"620000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"4059730700000063000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"00000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
depositRequestBytes2, err := hexutil.Decode("0x610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"620000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"4059730700000063000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"00000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
ebe := &enginev1.ExecutionBundleElectra{
|
||||
ExecutionRequests: [][]byte{append([]byte{uint8(enginev1.DepositRequestType)}, depositRequestBytes...), append([]byte{uint8(enginev1.DepositRequestType)}, depositRequestBytes2...)},
|
||||
}
|
||||
_, err = ebe.GetDecodedExecutionRequests()
|
||||
require.ErrorContains(t, "requests should be in sorted order and unique", err)
|
||||
})
|
||||
t.Run("If a request type is provided, but the request list is shorter than the ssz of 1 request we error", func(t *testing.T) {
|
||||
consolidationRequestBytes, err := hexutil.Decode("0x6600000000000000000000000000000000000000" +
|
||||
"670000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
require.NoError(t, err)
|
||||
ebe := &enginev1.ExecutionBundleElectra{
|
||||
ExecutionRequests: [][]byte{append([]byte{uint8(enginev1.DepositRequestType)}, []byte{}...), append([]byte{uint8(enginev1.ConsolidationRequestType)}, consolidationRequestBytes...)},
|
||||
}
|
||||
_, err = ebe.GetDecodedExecutionRequests()
|
||||
require.ErrorContains(t, "invalid deposit requests length", err)
|
||||
})
|
||||
t.Run("If deposit requests are over the max allowed per payload then we should error", func(t *testing.T) {
|
||||
requests := make([]*enginev1.DepositRequest, params.BeaconConfig().MaxDepositRequestsPerPayload+1)
|
||||
for i := range requests {
|
||||
requests[i] = &enginev1.DepositRequest{
|
||||
Pubkey: bytesutil.PadTo([]byte("pk"), 48),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte("wc"), 32),
|
||||
Amount: 123,
|
||||
Signature: bytesutil.PadTo([]byte("sig"), 96),
|
||||
Index: 456,
|
||||
}
|
||||
}
|
||||
by, err := enginev1.MarshalItems(requests)
|
||||
require.NoError(t, err)
|
||||
ebe := &enginev1.ExecutionBundleElectra{
|
||||
ExecutionRequests: [][]byte{
|
||||
append([]byte{uint8(enginev1.DepositRequestType)}, by...),
|
||||
},
|
||||
}
|
||||
_, err = ebe.GetDecodedExecutionRequests()
|
||||
require.ErrorContains(t, "invalid deposit requests length, requests should not be more than the max per payload", err)
|
||||
})
|
||||
t.Run("If withdrawal requests are over the max allowed per payload then we should error", func(t *testing.T) {
|
||||
requests := make([]*enginev1.WithdrawalRequest, params.BeaconConfig().MaxWithdrawalRequestsPerPayload+1)
|
||||
for i := range requests {
|
||||
requests[i] = &enginev1.WithdrawalRequest{
|
||||
SourceAddress: bytesutil.PadTo([]byte("sa"), 20),
|
||||
ValidatorPubkey: bytesutil.PadTo([]byte("pk"), 48),
|
||||
Amount: 55555,
|
||||
}
|
||||
}
|
||||
by, err := enginev1.MarshalItems(requests)
|
||||
require.NoError(t, err)
|
||||
ebe := &enginev1.ExecutionBundleElectra{
|
||||
ExecutionRequests: [][]byte{
|
||||
append([]byte{uint8(enginev1.WithdrawalRequestType)}, by...),
|
||||
},
|
||||
}
|
||||
_, err = ebe.GetDecodedExecutionRequests()
|
||||
require.ErrorContains(t, "invalid withdrawal requests length, requests should not be more than the max per payload", err)
|
||||
})
|
||||
t.Run("If consolidation requests are over the max allowed per payload then we should error", func(t *testing.T) {
|
||||
requests := make([]*enginev1.ConsolidationRequest, params.BeaconConfig().MaxConsolidationsRequestsPerPayload+1)
|
||||
for i := range requests {
|
||||
requests[i] = &enginev1.ConsolidationRequest{
|
||||
SourceAddress: bytesutil.PadTo([]byte("sa"), 20),
|
||||
SourcePubkey: bytesutil.PadTo([]byte("pk"), 48),
|
||||
TargetPubkey: bytesutil.PadTo([]byte("pk"), 48),
|
||||
}
|
||||
}
|
||||
by, err := enginev1.MarshalItems(requests)
|
||||
require.NoError(t, err)
|
||||
ebe := &enginev1.ExecutionBundleElectra{
|
||||
ExecutionRequests: [][]byte{
|
||||
append([]byte{uint8(enginev1.ConsolidationRequestType)}, by...),
|
||||
},
|
||||
}
|
||||
_, err = ebe.GetDecodedExecutionRequests()
|
||||
require.ErrorContains(t, "invalid consolidation requests length, requests should not be more than the max per payload", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEncodeExecutionRequests(t *testing.T) {
|
||||
t.Run("Empty execution requests should return an empty response and not nil", func(t *testing.T) {
|
||||
ebe := &enginev1.ExecutionRequests{}
|
||||
b, err := enginev1.EncodeExecutionRequests(ebe)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
require.Equal(t, len(b), 0)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUnmarshalItems_OK(t *testing.T) {
|
||||
drb, err := hexutil.Decode(depositRequestsSSZHex)
|
||||
require.NoError(t, err)
|
||||
|
||||
100
proto/engine/v1/execution_engine.pb.go
generated
100
proto/engine/v1/execution_engine.pb.go
generated
@@ -1697,61 +1697,6 @@ func (x *Blob) GetData() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
type BlobAndProof struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Blob []byte `protobuf:"bytes,1,opt,name=blob,proto3" json:"blob,omitempty" ssz-size:"131072"`
|
||||
KzgProof []byte `protobuf:"bytes,2,opt,name=kzg_proof,json=kzgProof,proto3" json:"kzg_proof,omitempty" ssz-size:"48"`
|
||||
}
|
||||
|
||||
func (x *BlobAndProof) Reset() {
|
||||
*x = BlobAndProof{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_engine_v1_execution_engine_proto_msgTypes[16]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BlobAndProof) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BlobAndProof) ProtoMessage() {}
|
||||
|
||||
func (x *BlobAndProof) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_engine_v1_execution_engine_proto_msgTypes[16]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BlobAndProof.ProtoReflect.Descriptor instead.
|
||||
func (*BlobAndProof) Descriptor() ([]byte, []int) {
|
||||
return file_proto_engine_v1_execution_engine_proto_rawDescGZIP(), []int{16}
|
||||
}
|
||||
|
||||
func (x *BlobAndProof) GetBlob() []byte {
|
||||
if x != nil {
|
||||
return x.Blob
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *BlobAndProof) GetKzgProof() []byte {
|
||||
if x != nil {
|
||||
return x.KzgProof
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_proto_engine_v1_execution_engine_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_engine_v1_execution_engine_proto_rawDesc = []byte{
|
||||
@@ -2127,23 +2072,17 @@ var file_proto_engine_v1_execution_engine_proto_rawDesc = []byte{
|
||||
0x31, 0x30, 0x37, 0x32, 0x92, 0xb5, 0x18, 0x04, 0x34, 0x30, 0x39, 0x36, 0x52, 0x05, 0x62, 0x6c,
|
||||
0x6f, 0x62, 0x73, 0x22, 0x26, 0x0a, 0x04, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x1e, 0x0a, 0x04, 0x64,
|
||||
0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31,
|
||||
0x33, 0x31, 0x30, 0x37, 0x32, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x53, 0x0a, 0x0c, 0x42,
|
||||
0x6c, 0x6f, 0x62, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x1e, 0x0a, 0x04, 0x62,
|
||||
0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x0a, 0x8a, 0xb5, 0x18, 0x06, 0x31,
|
||||
0x33, 0x31, 0x30, 0x37, 0x32, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x23, 0x0a, 0x09, 0x6b,
|
||||
0x7a, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x34, 0x38, 0x52, 0x08, 0x6b, 0x7a, 0x67, 0x50, 0x72, 0x6f, 0x6f, 0x66,
|
||||
0x42, 0x96, 0x01, 0x0a, 0x16, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
|
||||
0x6d, 0x2e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x14, 0x45, 0x78, 0x65,
|
||||
0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67,
|
||||
0x69, 0x6e, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0xaa,
|
||||
0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e,
|
||||
0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c,
|
||||
0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
0x33, 0x31, 0x30, 0x37, 0x32, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0x96, 0x01, 0x0a, 0x16,
|
||||
0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x6e, 0x67,
|
||||
0x69, 0x6e, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a,
|
||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d,
|
||||
0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76,
|
||||
0x35, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2f, 0x76,
|
||||
0x31, 0x3b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x76, 0x31, 0xaa, 0x02, 0x12, 0x45, 0x74, 0x68,
|
||||
0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x56, 0x31, 0xca,
|
||||
0x02, 0x12, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x6e, 0x67, 0x69, 0x6e,
|
||||
0x65, 0x5c, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -2159,7 +2098,7 @@ func file_proto_engine_v1_execution_engine_proto_rawDescGZIP() []byte {
|
||||
}
|
||||
|
||||
var file_proto_engine_v1_execution_engine_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
|
||||
var file_proto_engine_v1_execution_engine_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
|
||||
var file_proto_engine_v1_execution_engine_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
|
||||
var file_proto_engine_v1_execution_engine_proto_goTypes = []interface{}{
|
||||
(PayloadStatus_Status)(0), // 0: ethereum.engine.v1.PayloadStatus.Status
|
||||
(*ExecutionPayload)(nil), // 1: ethereum.engine.v1.ExecutionPayload
|
||||
@@ -2178,7 +2117,6 @@ var file_proto_engine_v1_execution_engine_proto_goTypes = []interface{}{
|
||||
(*Withdrawal)(nil), // 14: ethereum.engine.v1.Withdrawal
|
||||
(*BlobsBundle)(nil), // 15: ethereum.engine.v1.BlobsBundle
|
||||
(*Blob)(nil), // 16: ethereum.engine.v1.Blob
|
||||
(*BlobAndProof)(nil), // 17: ethereum.engine.v1.BlobAndProof
|
||||
}
|
||||
var file_proto_engine_v1_execution_engine_proto_depIdxs = []int32{
|
||||
14, // 0: ethereum.engine.v1.ExecutionPayloadCapella.withdrawals:type_name -> ethereum.engine.v1.Withdrawal
|
||||
@@ -2394,18 +2332,6 @@ func file_proto_engine_v1_execution_engine_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_engine_v1_execution_engine_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BlobAndProof); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
@@ -2413,7 +2339,7 @@ func file_proto_engine_v1_execution_engine_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_engine_v1_execution_engine_proto_rawDesc,
|
||||
NumEnums: 1,
|
||||
NumMessages: 17,
|
||||
NumMessages: 16,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user