mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
27 Commits
check_epoc
...
v5.3.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8c4ea850ba | ||
|
|
4b43f13e65 | ||
|
|
26d35474e9 | ||
|
|
9fbe3564df | ||
|
|
bed5547890 | ||
|
|
47922fe7d8 | ||
|
|
dcd25d1d97 | ||
|
|
81a2a17c5f | ||
|
|
6b3f1de19d | ||
|
|
7c17af2a41 | ||
|
|
ecf5a368d7 | ||
|
|
557c5be433 | ||
|
|
49405c3afd | ||
|
|
3439122629 | ||
|
|
f6e5da6723 | ||
|
|
842f241cb9 | ||
|
|
41daac1b04 | ||
|
|
2a7fc84044 | ||
|
|
44ff0b1a14 | ||
|
|
91cdd318a8 | ||
|
|
3dc00816fb | ||
|
|
e331d5b371 | ||
|
|
8d5090ce54 | ||
|
|
25244d906d | ||
|
|
aa445713ac | ||
|
|
177769a1ce | ||
|
|
967e9255a2 |
10
WORKSPACE
10
WORKSPACE
@@ -255,7 +255,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-beta.1"
|
||||
consensus_spec_version = "v1.5.0-beta.2"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -271,7 +271,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-R6r60geCfEjMaB1Ag3svaMFXFIgaJvkTJhfKsf76rFE=",
|
||||
integrity = "sha256-X/bMxbKg1clo2aFEjBoeuFq/U+BF1eQopgRP/7nI3Qg=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -287,7 +287,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-2Pem2gMHxW/6bBhZ2BaqkQruQSd/dTS3WMaMQO8rZ/o=",
|
||||
integrity = "sha256-WSxdri5OJGuNApW+odKle5UzToDyEOx+F3lMiqamJAg=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -303,7 +303,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-5yP05JTV1MhcUZ2kSh+T+kXjG+uW3A5877veC5c1mD4=",
|
||||
integrity = "sha256-LYE8l3y/zSt4YVrehrJ3ralqtgeYNildiIp+HR6+xAI=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -318,7 +318,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-O6Rg6h19T0RsJs0sBDZ9O1k4LnCJ/gu2ilHijFBVfME=",
|
||||
integrity = "sha256-jvZQ90qcJMTOqMsPO7sgeEVQmewZTHcz7LVDkNqwTFQ=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checkpoint.go",
|
||||
"client.go",
|
||||
"doc.go",
|
||||
"health.go",
|
||||
@@ -16,28 +15,19 @@ go_library(
|
||||
"//api/client/beacon/iface:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_mod//semver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"checkpoint_test.go",
|
||||
"client_test.go",
|
||||
"health_test.go",
|
||||
],
|
||||
@@ -45,19 +35,7 @@ go_test(
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon/testing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,276 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
base "github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
|
||||
|
||||
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
|
||||
// using Checkpoint Sync.
|
||||
type OriginData struct {
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.ReadOnlySignedBeaconBlock
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (o *OriginData) SaveBlock(dir string) (string, error) {
|
||||
blockPath := path.Join(dir, fname("block", o.vu, o.b.Block().Slot(), o.br))
|
||||
return blockPath, file.WriteFile(blockPath, o.BlockBytes())
|
||||
}
|
||||
|
||||
// SaveState saves the downloaded state to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (o *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", o.vu, o.st.Slot(), o.sr))
|
||||
return statePath, file.WriteFile(statePath, o.StateBytes())
|
||||
}
|
||||
|
||||
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
|
||||
func (o *OriginData) StateBytes() []byte {
|
||||
return o.sb
|
||||
}
|
||||
|
||||
// BlockBytes returns the ssz-encoded bytes of the downloaded ReadOnlySignedBeaconBlock value.
|
||||
func (o *OriginData) BlockBytes() []byte {
|
||||
return o.bb
|
||||
}
|
||||
|
||||
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot primitives.Slot, root [32]byte) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
|
||||
}
|
||||
|
||||
// DownloadFinalizedData downloads the most recently finalized state, and the block most recently applied to that state.
|
||||
// This pair can be used to initialize a new beacon node via checkpoint sync.
|
||||
func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
sb, err := client.GetState(ctx, IdFinalized)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"name": vu.Config.ConfigName,
|
||||
"fork": version.String(vu.Fork),
|
||||
}).Info("Detected supported config in remote finalized state")
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||
}
|
||||
|
||||
slot := s.LatestBlockHeader().Slot
|
||||
bb, err := client.GetBlock(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
br, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
bodyRoot, err := b.Block().Body().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
|
||||
}
|
||||
|
||||
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
|
||||
if sbr != bodyRoot {
|
||||
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
|
||||
}
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
|
||||
}
|
||||
|
||||
log.
|
||||
WithField("blockSlot", b.Block().Slot()).
|
||||
WithField("stateSlot", s.Slot()).
|
||||
WithField("stateRoot", hexutil.Encode(sr[:])).
|
||||
WithField("blockRoot", hexutil.Encode(br[:])).
|
||||
Info("Downloaded checkpoint sync state and block.")
|
||||
return &OriginData{
|
||||
st: s,
|
||||
b: b,
|
||||
sb: sb,
|
||||
bb: bb,
|
||||
vu: vu,
|
||||
br: br,
|
||||
sr: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint beacon node flag to be used for validation.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch primitives.Epoch
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint.
|
||||
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||
}
|
||||
|
||||
// ComputeWeakSubjectivityCheckpoint attempts to use the prysm weak_subjectivity api
|
||||
// to obtain the current weak_subjectivity checkpoint.
|
||||
// For non-prysm nodes, the same computation will be performed with extra steps,
|
||||
// using the head state downloaded from the beacon node api.
|
||||
func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
if err != nil {
|
||||
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, base.ErrNotOK) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// fall back to vanilla Beacon Node API method
|
||||
return computeBackwardsCompatible(ctx, client)
|
||||
}
|
||||
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
const (
|
||||
prysmMinimumVersion = "v2.0.7"
|
||||
prysmImplementationName = "Prysm"
|
||||
)
|
||||
|
||||
// errUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
|
||||
var errUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
|
||||
|
||||
// for older endpoints or clients that do not support the weak_subjectivity api method
|
||||
// we gather the necessary data for a checkpoint sync by:
|
||||
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
|
||||
// - requesting the state at the first slot of the epoch
|
||||
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
|
||||
// - requesting that block by its root
|
||||
func computeBackwardsCompatible(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
|
||||
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
|
||||
nv, err := client.GetNodeVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
|
||||
}
|
||||
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
|
||||
return nil, errors.Wrapf(errUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
|
||||
}
|
||||
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
|
||||
}
|
||||
|
||||
// use first slot of the epoch for the state slot
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", epoch)
|
||||
}
|
||||
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
// get the state at the first slot of the epoch
|
||||
sb, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
|
||||
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
|
||||
// compute state and block roots
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
|
||||
}
|
||||
|
||||
h := s.LatestBlockHeader()
|
||||
h.StateRoot = sr[:]
|
||||
br, err := h.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
bb, err := client.GetBlock(ctx, IdFromRoot(br))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %d", br)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
br, err = b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
|
||||
}
|
||||
|
||||
return &WeakSubjectivityData{
|
||||
Epoch: epoch,
|
||||
BlockRoot: br,
|
||||
StateRoot: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// this method downloads the head state, which can be used to find the correct chain config
|
||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (primitives.Epoch, error) {
|
||||
headBytes, err := client.GetState(ctx, IdHead)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
vu, err := detect.FromState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in remote head state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
headState, err := vu.UnmarshalBeaconState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
|
||||
}
|
||||
|
||||
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, vu.Config)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
|
||||
}
|
||||
|
||||
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
|
||||
return epoch, nil
|
||||
}
|
||||
@@ -29,12 +29,13 @@ const (
|
||||
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
||||
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
getWeakSubjectivityPath = "/prysm/v1/beacon/weak_subjectivity"
|
||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||
getConfigSpecPath = "/eth/v1/config/spec"
|
||||
getStatePath = "/eth/v2/debug/beacon/states"
|
||||
getNodeVersionPath = "/eth/v1/node/version"
|
||||
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
|
||||
|
||||
GetNodeVersionPath = "/eth/v1/node/version"
|
||||
GetWeakSubjectivityPath = "/prysm/v1/beacon/weak_subjectivity"
|
||||
)
|
||||
|
||||
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
|
||||
@@ -80,7 +81,8 @@ func idTemplate(ts string) func(StateOrBlockId) string {
|
||||
return f
|
||||
}
|
||||
|
||||
func renderGetBlockPath(id StateOrBlockId) string {
|
||||
// RenderGetBlockPath formats a block id into a path for the GetBlock API endpoint.
|
||||
func RenderGetBlockPath(id StateOrBlockId) string {
|
||||
return path.Join(getSignedBlockPath, string(id))
|
||||
}
|
||||
|
||||
@@ -104,7 +106,7 @@ func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
|
||||
// for the named identifiers.
|
||||
// The return value contains the ssz-encoded bytes.
|
||||
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
|
||||
blockPath := renderGetBlockPath(blockId)
|
||||
blockPath := RenderGetBlockPath(blockId)
|
||||
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
|
||||
@@ -195,6 +197,10 @@ type NodeVersion struct {
|
||||
systemInfo string
|
||||
}
|
||||
|
||||
func (nv *NodeVersion) SetImplementation(impl string) {
|
||||
nv.implementation = impl
|
||||
}
|
||||
|
||||
var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*)$`)
|
||||
|
||||
func parseNodeVersion(v string) (*NodeVersion, error) {
|
||||
@@ -212,7 +218,7 @@ func parseNodeVersion(v string) (*NodeVersion, error) {
|
||||
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
|
||||
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
|
||||
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
|
||||
b, err := c.Get(ctx, getNodeVersionPath)
|
||||
b, err := c.Get(ctx, GetNodeVersionPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting node version")
|
||||
}
|
||||
@@ -228,7 +234,8 @@ func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
|
||||
return parseNodeVersion(d.Data.Version)
|
||||
}
|
||||
|
||||
func renderGetStatePath(id StateOrBlockId) string {
|
||||
// RenderGetStatePath formats a state id into a path for the GetState API endpoint.
|
||||
func RenderGetStatePath(id StateOrBlockId) string {
|
||||
return path.Join(getStatePath, string(id))
|
||||
}
|
||||
|
||||
@@ -246,13 +253,29 @@ func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte,
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint beacon node flag to be used for validation.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch primitives.Epoch
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint.
|
||||
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||
}
|
||||
|
||||
// GetWeakSubjectivity calls a proposed API endpoint that is unique to prysm
|
||||
// This api method does the following:
|
||||
// - computes weak subjectivity epoch
|
||||
// - finds the highest non-skipped block preceding the epoch
|
||||
// - returns the htr of the found block and returns this + the value of state_root from the block
|
||||
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
|
||||
body, err := c.Get(ctx, getWeakSubjectivityPath)
|
||||
body, err := c.Get(ctx, GetWeakSubjectivityPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -97,31 +97,31 @@ func TestValidHostname(t *testing.T) {
|
||||
{
|
||||
name: "hostname with port",
|
||||
hostArg: "mydomain.org:3500",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://mydomain.org:3500/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "https scheme, hostname with port",
|
||||
hostArg: "https://mydomain.org:3500",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "https://mydomain.org:3500/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "http scheme, hostname without port",
|
||||
hostArg: "http://mydomain.org",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://mydomain.org/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "http scheme, trailing slash, hostname without port",
|
||||
hostArg: "http://mydomain.org/",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://mydomain.org/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "http scheme, hostname with basic auth creds and no port",
|
||||
hostArg: "http://username:pass@mydomain.org/",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://username:pass@mydomain.org/eth/v1/node/version",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -52,6 +52,9 @@ func HistoricalSummaryFromConsensus(s *eth.HistoricalSummary) *HistoricalSummary
|
||||
}
|
||||
|
||||
func (s *SignedBLSToExecutionChange) ToConsensus() (*eth.SignedBLSToExecutionChange, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
change, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -103,14 +106,17 @@ func SignedBLSChangeFromConsensus(ch *eth.SignedBLSToExecutionChange) *SignedBLS
|
||||
|
||||
func SignedBLSChangesToConsensus(src []*SignedBLSToExecutionChange) ([]*eth.SignedBLSToExecutionChange, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedBLSToExecutionChanges")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "SignedBLSToExecutionChanges")
|
||||
}
|
||||
changes := make([]*eth.SignedBLSToExecutionChange, len(src))
|
||||
for i, ch := range src {
|
||||
if ch == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
changes[i], err = ch.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
@@ -156,6 +162,9 @@ func ForkFromConsensus(f *eth.Fork) *Fork {
|
||||
}
|
||||
|
||||
func (s *SignedValidatorRegistration) ToConsensus() (*eth.SignedValidatorRegistrationV1, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -212,6 +221,9 @@ func SignedValidatorRegistrationFromConsensus(vr *eth.SignedValidatorRegistratio
|
||||
}
|
||||
|
||||
func (s *SignedContributionAndProof) ToConsensus() (*eth.SignedContributionAndProof, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -236,6 +248,9 @@ func SignedContributionAndProofFromConsensus(c *eth.SignedContributionAndProof)
|
||||
}
|
||||
|
||||
func (c *ContributionAndProof) ToConsensus() (*eth.ContributionAndProof, error) {
|
||||
if c.Contribution == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Contribution")
|
||||
}
|
||||
contribution, err := c.Contribution.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Contribution")
|
||||
@@ -307,6 +322,9 @@ func SyncCommitteeContributionFromConsensus(c *eth.SyncCommitteeContribution) *S
|
||||
}
|
||||
|
||||
func (s *SignedAggregateAttestationAndProof) ToConsensus() (*eth.SignedAggregateAttestationAndProof, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -327,6 +345,9 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
if a.Aggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Aggregate")
|
||||
}
|
||||
agg, err := a.Aggregate.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
@@ -343,6 +364,9 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
}
|
||||
|
||||
func (s *SignedAggregateAttestationAndProofElectra) ToConsensus() (*eth.SignedAggregateAttestationAndProofElectra, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -363,6 +387,9 @@ func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttes
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
if a.Aggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Aggregate")
|
||||
}
|
||||
agg, err := a.Aggregate.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
@@ -383,6 +410,9 @@ func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -412,6 +442,9 @@ func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -433,6 +466,15 @@ func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SingleAttFromConsensus(a *eth.SingleAttestation) *SingleAttestation {
|
||||
return &SingleAttestation{
|
||||
CommitteeIndex: fmt.Sprintf("%d", a.CommitteeId),
|
||||
AttesterIndex: fmt.Sprintf("%d", a.AttesterIndex),
|
||||
Data: AttDataFromConsensus(a.Data),
|
||||
Signature: hexutil.Encode(a.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
|
||||
ci, err := strconv.ParseUint(a.CommitteeIndex, 10, 64)
|
||||
if err != nil {
|
||||
@@ -442,6 +484,9 @@ func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AttesterIndex")
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -481,10 +526,16 @@ func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BeaconBlockRoot")
|
||||
}
|
||||
if a.Source == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Source")
|
||||
}
|
||||
source, err := a.Source.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Source")
|
||||
}
|
||||
if a.Target == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Target")
|
||||
}
|
||||
target, err := a.Target.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Target")
|
||||
@@ -584,15 +635,17 @@ func (b *BeaconCommitteeSubscription) ToConsensus() (*validator.BeaconCommitteeS
|
||||
}
|
||||
|
||||
func (e *SignedVoluntaryExit) ToConsensus() (*eth.SignedVoluntaryExit, error) {
|
||||
sig, err := bytesutil.DecodeHexWithLength(e.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
if e.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
exit, err := e.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
}
|
||||
|
||||
sig, err := bytesutil.DecodeHexWithLength(e.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
return ð.SignedVoluntaryExit{
|
||||
Exit: exit,
|
||||
Signature: sig,
|
||||
@@ -695,10 +748,16 @@ func Eth1DataFromConsensus(e1d *eth.Eth1Data) *Eth1Data {
|
||||
}
|
||||
|
||||
func (s *ProposerSlashing) ToConsensus() (*eth.ProposerSlashing, error) {
|
||||
if s.SignedHeader1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedHeader1")
|
||||
}
|
||||
h1, err := s.SignedHeader1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SignedHeader1")
|
||||
}
|
||||
if s.SignedHeader2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedHeader2")
|
||||
}
|
||||
h2, err := s.SignedHeader2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SignedHeader2")
|
||||
@@ -711,10 +770,16 @@ func (s *ProposerSlashing) ToConsensus() (*eth.ProposerSlashing, error) {
|
||||
}
|
||||
|
||||
func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation1")
|
||||
}
|
||||
att1, err := s.Attestation1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation1")
|
||||
}
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation2")
|
||||
}
|
||||
att2, err := s.Attestation2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation2")
|
||||
@@ -723,10 +788,16 @@ func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
|
||||
}
|
||||
|
||||
func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, error) {
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation1")
|
||||
}
|
||||
att1, err := s.Attestation1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation1")
|
||||
}
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation2")
|
||||
}
|
||||
att2, err := s.Attestation2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation2")
|
||||
@@ -747,6 +818,9 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -779,6 +853,9 @@ func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectr
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -934,11 +1011,11 @@ func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
|
||||
|
||||
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "ProposerSlashings")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "ProposerSlashings")
|
||||
}
|
||||
proposerSlashings := make([]*eth.ProposerSlashing, len(src))
|
||||
for i, s := range src {
|
||||
@@ -1067,11 +1144,11 @@ func ProposerSlashingFromConsensus(src *eth.ProposerSlashing) *ProposerSlashing
|
||||
|
||||
func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlashing, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "AttesterSlashings")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "AttesterSlashings")
|
||||
}
|
||||
|
||||
attesterSlashings := make([]*eth.AttesterSlashing, len(src))
|
||||
@@ -1082,10 +1159,19 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
|
||||
}
|
||||
|
||||
if s.Attestation1.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
}
|
||||
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
|
||||
}
|
||||
|
||||
if s.Attestation2.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2.Data", i))
|
||||
}
|
||||
|
||||
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
@@ -1102,6 +1188,7 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash
|
||||
}
|
||||
a1AttestingIndices[j] = attestingIndex
|
||||
}
|
||||
|
||||
a1Data, err := s.Attestation1.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
@@ -1199,11 +1286,11 @@ func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing
|
||||
|
||||
func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth.AttesterSlashingElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "AttesterSlashingsElectra")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, fieldparams.MaxAttesterSlashingsElectra)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "AttesterSlashingsElectra")
|
||||
}
|
||||
|
||||
attesterSlashings := make([]*eth.AttesterSlashingElectra, len(src))
|
||||
@@ -1211,13 +1298,23 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
|
||||
if s == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
|
||||
}
|
||||
|
||||
if s.Attestation1.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
}
|
||||
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
|
||||
}
|
||||
|
||||
if s.Attestation2.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2.Data", i))
|
||||
}
|
||||
|
||||
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
@@ -1331,15 +1428,18 @@ func AttesterSlashingElectraFromConsensus(src *eth.AttesterSlashingElectra) *Att
|
||||
|
||||
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestations")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 128)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "Attestations")
|
||||
}
|
||||
|
||||
atts := make([]*eth.Attestation, len(src))
|
||||
for i, a := range src {
|
||||
if a == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
atts[i], err = a.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
@@ -1358,15 +1458,18 @@ func AttsFromConsensus(src []*eth.Attestation) []*Attestation {
|
||||
|
||||
func AttsElectraToConsensus(src []*AttestationElectra) ([]*eth.AttestationElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "AttestationsElectra")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 8)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "AttestationsElectra")
|
||||
}
|
||||
|
||||
atts := make([]*eth.AttestationElectra, len(src))
|
||||
for i, a := range src {
|
||||
if a == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
atts[i], err = a.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
@@ -1385,11 +1488,11 @@ func AttsElectraFromConsensus(src []*eth.AttestationElectra) []*AttestationElect
|
||||
|
||||
func DepositsToConsensus(src []*Deposit) ([]*eth.Deposit, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "Deposits")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "Deposits")
|
||||
}
|
||||
|
||||
deposits := make([]*eth.Deposit, len(src))
|
||||
@@ -1461,15 +1564,18 @@ func DepositsFromConsensus(src []*eth.Deposit) []*Deposit {
|
||||
|
||||
func SignedExitsToConsensus(src []*SignedVoluntaryExit) ([]*eth.SignedVoluntaryExit, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedVoluntaryExits")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "SignedVoluntaryExits")
|
||||
}
|
||||
|
||||
exits := make([]*eth.SignedVoluntaryExit, len(src))
|
||||
for i, e := range src {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
exits[i], err = e.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
|
||||
@@ -24,3 +24,96 @@ func TestDepositSnapshotFromConsensus(t *testing.T) {
|
||||
require.Equal(t, "0x1234", res.ExecutionBlockHash)
|
||||
require.Equal(t, "67890", res.ExecutionBlockHeight)
|
||||
}
|
||||
|
||||
func TestSignedBLSToExecutionChange_ToConsensus(t *testing.T) {
|
||||
s := &SignedBLSToExecutionChange{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedValidatorRegistration_ToConsensus(t *testing.T) {
|
||||
s := &SignedValidatorRegistration{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedContributionAndProof_ToConsensus(t *testing.T) {
|
||||
s := &SignedContributionAndProof{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestContributionAndProof_ToConsensus(t *testing.T) {
|
||||
c := &ContributionAndProof{
|
||||
Contribution: nil,
|
||||
AggregatorIndex: "invalid",
|
||||
SelectionProof: "",
|
||||
}
|
||||
_, err := c.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedAggregateAttestationAndProof_ToConsensus(t *testing.T) {
|
||||
s := &SignedAggregateAttestationAndProof{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAggregateAttestationAndProof_ToConsensus(t *testing.T) {
|
||||
a := &AggregateAttestationAndProof{
|
||||
AggregatorIndex: "1",
|
||||
Aggregate: nil,
|
||||
SelectionProof: "",
|
||||
}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAttestation_ToConsensus(t *testing.T) {
|
||||
a := &Attestation{
|
||||
AggregationBits: "0x10",
|
||||
Data: nil,
|
||||
Signature: "",
|
||||
}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSingleAttestation_ToConsensus(t *testing.T) {
|
||||
s := &SingleAttestation{
|
||||
CommitteeIndex: "1",
|
||||
AttesterIndex: "1",
|
||||
Data: nil,
|
||||
Signature: "",
|
||||
}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedVoluntaryExit_ToConsensus(t *testing.T) {
|
||||
s := &SignedVoluntaryExit{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestProposerSlashing_ToConsensus(t *testing.T) {
|
||||
p := &ProposerSlashing{SignedHeader1: nil, SignedHeader2: nil}
|
||||
_, err := p.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAttesterSlashing_ToConsensus(t *testing.T) {
|
||||
a := &AttesterSlashing{Attestation1: nil, Attestation2: nil}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestIndexedAttestation_ToConsensus(t *testing.T) {
|
||||
a := &IndexedAttestation{
|
||||
AttestingIndices: []string{"1"},
|
||||
Data: nil,
|
||||
Signature: "invalid",
|
||||
}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
@@ -512,17 +512,11 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
if len(expected) > maxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices, err := bs.Indices(root, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
ui := uint64(i)
|
||||
if len(expected[i]) > 0 {
|
||||
if !indices[i] {
|
||||
missing[ui] = struct{}{}
|
||||
}
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
missing[uint64(i)] = struct{}{}
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -552,7 +553,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
|
||||
// inserts finalized deposits into our finalized deposit trie, needs to be
|
||||
// called in the background
|
||||
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
// Post-Electra: prunes all proofs and pending deposits in the cache
|
||||
func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
|
||||
defer span.End()
|
||||
startTime := time.Now()
|
||||
@@ -563,6 +565,16 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
log.WithError(err).Error("could not fetch finalized state")
|
||||
return
|
||||
}
|
||||
|
||||
// Check if we should prune all pending deposits.
|
||||
// In post-Electra(after the legacy deposit mechanism is deprecated),
|
||||
// we can prune all pending deposits in the deposit cache.
|
||||
// See: https://eips.ethereum.org/EIPS/eip-6110#eth1data-poll-deprecation
|
||||
if helpers.DepositRequestsStarted(finalizedState) {
|
||||
s.pruneAllPendingDepositsAndProofs(ctx)
|
||||
return
|
||||
}
|
||||
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
@@ -591,6 +603,12 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
log.WithField("duration", time.Since(startTime).String()).Debugf("Finalized deposit insertion completed at index %d", finalizedEth1DepIdx)
|
||||
}
|
||||
|
||||
// pruneAllPendingDepositsAndProofs prunes all proofs and pending deposits in the cache.
|
||||
func (s *Service) pruneAllPendingDepositsAndProofs(ctx context.Context) {
|
||||
s.cfg.DepositCache.PruneAllPendingDeposits(ctx)
|
||||
s.cfg.DepositCache.PruneAllProofs(ctx)
|
||||
}
|
||||
|
||||
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling
|
||||
// fork choice justification routine.
|
||||
func (s *Service) ensureRootNotZeros(root [32]byte) [32]byte {
|
||||
|
||||
@@ -723,7 +723,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -759,7 +759,7 @@ func TestInsertFinalizedDeposits_PrunePendingDeposits(t *testing.T) {
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root))
|
||||
}
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -799,7 +799,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
// Insert 3 deposits before hand.
|
||||
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2, [32]byte{}, 0))
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -810,7 +810,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
// Insert New Finalized State with higher deposit count.
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
|
||||
fDeposits, err = depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -2297,7 +2297,7 @@ func TestMissingIndices(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present...))
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
|
||||
missing, err := missingIndices(bs, c.root, c.expected, 0)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
|
||||
@@ -279,9 +279,10 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
go func() {
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
s.insertFinalizedDepositsAndPrune(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -455,41 +455,81 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
Root: headRoot[:],
|
||||
}))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
t.Run("pre deposit request", func(t *testing.T) {
|
||||
require.NoError(t, headState.SetEth1DepositIndex(1))
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
// check deposit
|
||||
require.LogsContain(t, logHook, "Finalized deposit insertion completed at index")
|
||||
})
|
||||
t.Run("deposit requests started", func(t *testing.T) {
|
||||
require.NoError(t, headState.SetEth1DepositIndex(1))
|
||||
require.NoError(t, headState.SetDepositRequestsStartIndex(1))
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
})
|
||||
|
||||
// check deposit
|
||||
require.LogsContain(t, logHook, "Finalized deposit insertion completed at index")
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"deposit_fetcher.go",
|
||||
"deposit_inserter.go",
|
||||
"deposit_pruner.go",
|
||||
"deposit_tree.go",
|
||||
"deposit_tree_snapshot.go",
|
||||
"merkle_tree.go",
|
||||
@@ -35,6 +36,7 @@ go_test(
|
||||
srcs = [
|
||||
"deposit_cache_test.go",
|
||||
"deposit_fetcher_test.go",
|
||||
"deposit_pruner_test.go",
|
||||
"deposit_tree_snapshot_test.go",
|
||||
"merkle_tree_test.go",
|
||||
"spec_test.go",
|
||||
|
||||
@@ -903,189 +903,6 @@ func TestMin(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[2].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}}, index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestDepositMap_WorksCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -178,52 +178,6 @@ func (c *Cache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneProofs")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(c.deposits)) {
|
||||
untilDepositIndex = int64(len(c.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if c.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
c.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(c.pendingDeposits))
|
||||
for _, dp := range c.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
c.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(c.pendingDeposits)))
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (c *Cache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
|
||||
@@ -44,67 +44,3 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
all := dc.PendingDeposits(context.Background(), nil)
|
||||
assert.Equal(t, len(dc.pendingDeposits), len(all), "PendingDeposits(ctx, nil) did not return all deposits")
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
88
beacon-chain/cache/depositsnapshot/deposit_pruner.go
vendored
Normal file
88
beacon-chain/cache/depositsnapshot/deposit_pruner.go
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneProofs")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(c.deposits)) {
|
||||
untilDepositIndex = int64(len(c.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if c.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
c.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneAllProofs removes proofs from all deposits.
|
||||
// As EIP-6110 applies and the legacy deposit mechanism is deprecated,
|
||||
// proofs in deposit snapshot are no longer needed.
|
||||
// See: https://eips.ethereum.org/EIPS/eip-6110#eth1data-poll-deprecation
|
||||
func (c *Cache) PruneAllProofs(ctx context.Context) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneAllProofs")
|
||||
defer span.End()
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
for i := len(c.deposits) - 1; i >= 0; i-- {
|
||||
if c.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
c.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(c.pendingDeposits))
|
||||
for _, dp := range c.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
c.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(c.pendingDeposits)))
|
||||
}
|
||||
|
||||
// PruneAllPendingDeposits removes all pending deposits from the cache.
|
||||
// As EIP-6110 applies and the legacy deposit mechanism is deprecated,
|
||||
// pending deposits in deposit snapshot are no longer needed.
|
||||
// See: https://eips.ethereum.org/EIPS/eip-6110#eth1data-poll-deprecation
|
||||
func (c *Cache) PruneAllPendingDeposits(ctx context.Context) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneAllPendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
c.pendingDeposits = make([]*ethpb.DepositContainer, 0)
|
||||
pendingDepositsCount.Set(float64(0))
|
||||
}
|
||||
323
beacon-chain/cache/depositsnapshot/deposit_pruner_test.go
vendored
Normal file
323
beacon-chain/cache/depositsnapshot/deposit_pruner_test.go
vendored
Normal file
@@ -0,0 +1,323 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPruneAllPendingDeposits(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PruneAllPendingDeposits(context.Background())
|
||||
expected := []*ethpb.DepositContainer{}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[2].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}}, index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneAllProofs(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
dc.PruneAllProofs(context.Background())
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
11
beacon-chain/cache/interfaces.go
vendored
11
beacon-chain/cache/interfaces.go
vendored
@@ -12,6 +12,7 @@ import (
|
||||
type DepositCache interface {
|
||||
DepositFetcher
|
||||
DepositInserter
|
||||
DepositPruner
|
||||
}
|
||||
|
||||
// DepositFetcher defines a struct which can retrieve deposit information from a store.
|
||||
@@ -23,8 +24,6 @@ type DepositFetcher interface {
|
||||
InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte)
|
||||
PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
|
||||
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
|
||||
PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64)
|
||||
PruneProofs(ctx context.Context, untilDepositIndex int64) error
|
||||
FinalizedFetcher
|
||||
}
|
||||
|
||||
@@ -42,6 +41,14 @@ type FinalizedFetcher interface {
|
||||
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
|
||||
}
|
||||
|
||||
// DepositPruner is an interface for pruning deposits and proofs.
|
||||
type DepositPruner interface {
|
||||
PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64)
|
||||
PruneAllPendingDeposits(ctx context.Context)
|
||||
PruneProofs(ctx context.Context, untilDepositIndex int64) error
|
||||
PruneAllProofs(ctx context.Context)
|
||||
}
|
||||
|
||||
// FinalizedDeposits defines a method to access a merkle tree containing deposits and their indexes.
|
||||
type FinalizedDeposits interface {
|
||||
Deposits() MerkleTree
|
||||
|
||||
@@ -177,9 +177,9 @@ func TestComputeConsolidationEpochAndUpdateChurn(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000)+1,
|
||||
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000) + 1,
|
||||
expectedEpoch: 18, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(32000000000000000)-1,
|
||||
expectedConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(32000000000000000) - 1,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -32,6 +32,9 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// SingleAttReceived is sent after a single attestation object is received from gossip or rpc
|
||||
SingleAttReceived = 9
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -43,7 +46,7 @@ type UnAggregatedAttReceivedData struct {
|
||||
// AggregatedAttReceivedData is the data sent with AggregatedAttReceived events.
|
||||
type AggregatedAttReceivedData struct {
|
||||
// Attestation is the aggregated attestation object.
|
||||
Attestation *ethpb.AggregateAttestationAndProof
|
||||
Attestation ethpb.AggregateAttAndProof
|
||||
}
|
||||
|
||||
// ExitReceivedData is the data sent with ExitReceived events.
|
||||
@@ -77,3 +80,8 @@ type ProposerSlashingReceivedData struct {
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing ethpb.AttSlashing
|
||||
}
|
||||
|
||||
// SingleAttReceivedData is the data sent with SingleAttReceived events.
|
||||
type SingleAttReceivedData struct {
|
||||
Attestation ethpb.Att
|
||||
}
|
||||
|
||||
@@ -22,6 +22,15 @@ import (
|
||||
)
|
||||
|
||||
func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
|
||||
@@ -59,9 +68,31 @@ func TestLightClient_NewLightClientOptimisticUpdateFromBeaconState(t *testing.T)
|
||||
l.CheckSyncAggregate(update.SyncAggregate())
|
||||
l.CheckAttestedHeader(update.AttestedHeader())
|
||||
})
|
||||
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestElectra(false)
|
||||
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot(), "Signature slot is not equal")
|
||||
|
||||
l.CheckSyncAggregate(update.SyncAggregate())
|
||||
l.CheckAttestedHeader(update.AttestedHeader())
|
||||
})
|
||||
}
|
||||
|
||||
func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.BellatrixForkEpoch = 2
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
|
||||
@@ -356,6 +387,157 @@ func TestLightClient_NewLightClientFinalityUpdateFromBeaconState(t *testing.T) {
|
||||
require.DeepSSZEqual(t, execution, updateExecution.Proto(), "Finalized Block Execution is not equal")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
t.Run("FinalizedBlock Not Nil", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestElectra(false)
|
||||
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot(), "Signature slot is not equal")
|
||||
|
||||
l.CheckSyncAggregate(update.SyncAggregate())
|
||||
l.CheckAttestedHeader(update.AttestedHeader())
|
||||
|
||||
//zeroHash := params.BeaconConfig().ZeroHash[:]
|
||||
finalizedBlockHeader, err := l.FinalizedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update.FinalizedHeader(), "Finalized header is nil")
|
||||
updateFinalizedHeaderBeacon := update.FinalizedHeader().Beacon()
|
||||
require.Equal(t, finalizedBlockHeader.Header.Slot, updateFinalizedHeaderBeacon.Slot, "Finalized header slot is not equal")
|
||||
require.Equal(t, finalizedBlockHeader.Header.ProposerIndex, updateFinalizedHeaderBeacon.ProposerIndex, "Finalized header proposer index is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.ParentRoot, updateFinalizedHeaderBeacon.ParentRoot, "Finalized header parent root is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.StateRoot, updateFinalizedHeaderBeacon.StateRoot, "Finalized header state root is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.BodyRoot, updateFinalizedHeaderBeacon.BodyRoot, "Finalized header body root is not equal")
|
||||
fb, err := update.FinalityBranchElectra()
|
||||
require.NoError(t, err)
|
||||
proof, err := l.AttestedState.FinalizedRootProof(l.Ctx)
|
||||
require.NoError(t, err)
|
||||
for i, leaf := range fb {
|
||||
require.DeepSSZEqual(t, proof[i], leaf[:], "Leaf is not equal")
|
||||
}
|
||||
|
||||
// Check Execution BlockHash
|
||||
payloadInterface, err := l.FinalizedBlock.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
transactionsRoot, err := payloadInterface.TransactionsRoot()
|
||||
if errors.Is(err, consensustypes.ErrUnsupportedField) {
|
||||
transactions, err := payloadInterface.Transactions()
|
||||
require.NoError(t, err)
|
||||
transactionsRootArray, err := ssz.TransactionsRoot(transactions)
|
||||
require.NoError(t, err)
|
||||
transactionsRoot = transactionsRootArray[:]
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
withdrawalsRoot, err := payloadInterface.WithdrawalsRoot()
|
||||
if errors.Is(err, consensustypes.ErrUnsupportedField) {
|
||||
withdrawals, err := payloadInterface.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRootArray, err := ssz.WithdrawalSliceRoot(withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot = withdrawalsRootArray[:]
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
execution := &v11.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadInterface.ParentHash(),
|
||||
FeeRecipient: payloadInterface.FeeRecipient(),
|
||||
StateRoot: payloadInterface.StateRoot(),
|
||||
ReceiptsRoot: payloadInterface.ReceiptsRoot(),
|
||||
LogsBloom: payloadInterface.LogsBloom(),
|
||||
PrevRandao: payloadInterface.PrevRandao(),
|
||||
BlockNumber: payloadInterface.BlockNumber(),
|
||||
GasLimit: payloadInterface.GasLimit(),
|
||||
GasUsed: payloadInterface.GasUsed(),
|
||||
Timestamp: payloadInterface.Timestamp(),
|
||||
ExtraData: payloadInterface.ExtraData(),
|
||||
BaseFeePerGas: payloadInterface.BaseFeePerGas(),
|
||||
BlockHash: payloadInterface.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
}
|
||||
updateExecution, err := update.FinalizedHeader().Execution()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, execution, updateExecution.Proto(), "Finalized Block Execution is not equal")
|
||||
})
|
||||
|
||||
t.Run("FinalizedBlock In Previous Fork", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestElectraFinalizedBlockDeneb(false)
|
||||
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State.Slot(), l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update, "update is nil")
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), update.SignatureSlot(), "Signature slot is not equal")
|
||||
|
||||
l.CheckSyncAggregate(update.SyncAggregate())
|
||||
l.CheckAttestedHeader(update.AttestedHeader())
|
||||
|
||||
finalizedBlockHeader, err := l.FinalizedBlock.Header()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, update.FinalizedHeader(), "Finalized header is nil")
|
||||
updateFinalizedHeaderBeacon := update.FinalizedHeader().Beacon()
|
||||
require.Equal(t, finalizedBlockHeader.Header.Slot, updateFinalizedHeaderBeacon.Slot, "Finalized header slot is not equal")
|
||||
require.Equal(t, finalizedBlockHeader.Header.ProposerIndex, updateFinalizedHeaderBeacon.ProposerIndex, "Finalized header proposer index is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.ParentRoot, updateFinalizedHeaderBeacon.ParentRoot, "Finalized header parent root is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.StateRoot, updateFinalizedHeaderBeacon.StateRoot, "Finalized header state root is not equal")
|
||||
require.DeepSSZEqual(t, finalizedBlockHeader.Header.BodyRoot, updateFinalizedHeaderBeacon.BodyRoot, "Finalized header body root is not equal")
|
||||
fb, err := update.FinalityBranchElectra()
|
||||
require.NoError(t, err)
|
||||
proof, err := l.AttestedState.FinalizedRootProof(l.Ctx)
|
||||
require.NoError(t, err)
|
||||
for i, leaf := range fb {
|
||||
require.DeepSSZEqual(t, proof[i], leaf[:], "Leaf is not equal")
|
||||
}
|
||||
|
||||
// Check Execution BlockHash
|
||||
payloadInterface, err := l.FinalizedBlock.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
transactionsRoot, err := payloadInterface.TransactionsRoot()
|
||||
if errors.Is(err, consensustypes.ErrUnsupportedField) {
|
||||
transactions, err := payloadInterface.Transactions()
|
||||
require.NoError(t, err)
|
||||
transactionsRootArray, err := ssz.TransactionsRoot(transactions)
|
||||
require.NoError(t, err)
|
||||
transactionsRoot = transactionsRootArray[:]
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
withdrawalsRoot, err := payloadInterface.WithdrawalsRoot()
|
||||
if errors.Is(err, consensustypes.ErrUnsupportedField) {
|
||||
withdrawals, err := payloadInterface.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRootArray, err := ssz.WithdrawalSliceRoot(withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot = withdrawalsRootArray[:]
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
execution := &v11.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadInterface.ParentHash(),
|
||||
FeeRecipient: payloadInterface.FeeRecipient(),
|
||||
StateRoot: payloadInterface.StateRoot(),
|
||||
ReceiptsRoot: payloadInterface.ReceiptsRoot(),
|
||||
LogsBloom: payloadInterface.LogsBloom(),
|
||||
PrevRandao: payloadInterface.PrevRandao(),
|
||||
BlockNumber: payloadInterface.BlockNumber(),
|
||||
GasLimit: payloadInterface.GasLimit(),
|
||||
GasUsed: payloadInterface.GasUsed(),
|
||||
Timestamp: payloadInterface.Timestamp(),
|
||||
ExtraData: payloadInterface.ExtraData(),
|
||||
BaseFeePerGas: payloadInterface.BaseFeePerGas(),
|
||||
BlockHash: payloadInterface.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
}
|
||||
updateExecution, err := update.FinalizedHeader().Execution()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, execution, updateExecution.Proto(), "Finalized Block Execution is not equal")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
|
||||
@@ -94,14 +94,7 @@ func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current pri
|
||||
entry := s.cache.ensure(key)
|
||||
defer s.cache.delete(key)
|
||||
root := b.Root()
|
||||
sumz, err := s.store.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", b.Root())).
|
||||
WithError(err).
|
||||
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
|
||||
} else {
|
||||
entry.setDiskSummary(sumz.Summary(root))
|
||||
}
|
||||
entry.setDiskSummary(s.store.Summary(root))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
|
||||
@@ -5,6 +5,10 @@ go_library(
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"cache.go",
|
||||
"iteration.go",
|
||||
"layout.go",
|
||||
"layout_by_epoch.go",
|
||||
"layout_flat.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"mock.go",
|
||||
@@ -13,6 +17,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -20,7 +25,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
@@ -37,10 +41,14 @@ go_test(
|
||||
srcs = [
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"iteration_test.go",
|
||||
"layout_test.go",
|
||||
"migration_test.go",
|
||||
"pruner_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -48,6 +56,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -1,42 +1,31 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/logging"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func directoryPermissions() os.FileMode {
|
||||
return params.BeaconIoConfig().ReadWriteExecutePermissions
|
||||
}
|
||||
|
||||
var (
|
||||
errIndexOutOfBounds = errors.New("blob index in file name >= DeprecatedMaxBlobsPerBlock")
|
||||
errEmptyBlobWritten = errors.New("zero bytes written to disk when saving blob sidecar")
|
||||
errIndexOutOfBounds = errors.New("blob index in file name >= MAX_BLOBS_PER_BLOCK")
|
||||
errSidecarEmptySSZData = errors.New("sidecar marshalled to an empty ssz byte slice")
|
||||
errNoBasePath = errors.New("BlobStorage base path not specified in init")
|
||||
errInvalidRootString = errors.New("Could not parse hex string as a [32]byte")
|
||||
)
|
||||
|
||||
const (
|
||||
sszExt = "ssz"
|
||||
partExt = "part"
|
||||
|
||||
directoryPermissions = 0700
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
@@ -66,6 +55,23 @@ func WithSaveFsync(fsync bool) BlobStorageOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithFs allows the afero.Fs implementation to be customized. Used by tests
|
||||
// to substitute an in-memory filesystem.
|
||||
func WithFs(fs afero.Fs) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
b.fs = fs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLayout enables the user to specify which layout scheme to use, dictating how blob files are stored on disk.
|
||||
func WithLayout(name string) BlobStorageOption {
|
||||
return func(b *BlobStorage) error {
|
||||
b.layoutName = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
@@ -76,19 +82,27 @@ func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
}
|
||||
}
|
||||
if b.base == "" {
|
||||
return nil, errNoBasePath
|
||||
// Allow tests to set up a different fs using WithFs.
|
||||
if b.fs == nil {
|
||||
if b.base == "" {
|
||||
return nil, errNoBasePath
|
||||
}
|
||||
b.base = path.Clean(b.base)
|
||||
if err := file.MkdirAll(b.base); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create blob storage at %s", b.base)
|
||||
}
|
||||
b.fs = afero.NewBasePathFs(afero.NewOsFs(), b.base)
|
||||
}
|
||||
b.base = path.Clean(b.base)
|
||||
if err := file.MkdirAll(b.base); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create blob storage at %s", b.base)
|
||||
b.cache = newBlobStorageCache()
|
||||
pruner := newBlobPruner(b.retentionEpochs)
|
||||
if b.layoutName == "" {
|
||||
b.layoutName = LayoutNameFlat
|
||||
}
|
||||
b.fs = afero.NewBasePathFs(afero.NewOsFs(), b.base)
|
||||
pruner, err := newBlobPruner(b.fs, b.retentionEpochs)
|
||||
layout, err := newLayout(b.layoutName, b.fs, b.cache, pruner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.pruner = pruner
|
||||
b.layout = layout
|
||||
return b, nil
|
||||
}
|
||||
|
||||
@@ -96,47 +110,103 @@ func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
type BlobStorage struct {
|
||||
base string
|
||||
retentionEpochs primitives.Epoch
|
||||
layoutName string
|
||||
fsync bool
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
layout fsLayout
|
||||
cache *blobStorageSummaryCache
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
// will be populated at node startup, avoiding a costly cold prune (~4s in syscalls) during syncing.
|
||||
func (bs *BlobStorage) WarmCache() {
|
||||
if bs.pruner == nil {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
start := time.Now()
|
||||
start := time.Now()
|
||||
if bs.layoutName == LayoutNameFlat {
|
||||
log.Info("Blob filesystem cache warm-up started. This may take a few minutes.")
|
||||
if err := bs.pruner.warmCache(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob pruner cache")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete")
|
||||
}()
|
||||
} else {
|
||||
log.Info("Blob filesystem cache warm-up started.")
|
||||
}
|
||||
|
||||
if err := warmCache(bs.layout, bs.cache); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob filesystem cache.")
|
||||
}
|
||||
if err := bs.migrateLayouts(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while migrating blob storage.")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
|
||||
}
|
||||
|
||||
// ErrBlobStorageSummarizerUnavailable is a sentinel error returned when there is no pruner/cache available.
|
||||
// This should be used by code that optionally uses the summarizer to optimize rpc requests. Being able to
|
||||
// fallback when there is no summarizer allows client code to avoid test complexity where the summarizer doesn't matter.
|
||||
var ErrBlobStorageSummarizerUnavailable = errors.New("BlobStorage not initialized with a pruner or cache")
|
||||
|
||||
// WaitForSummarizer blocks until the BlobStorageSummarizer is ready to use.
|
||||
// BlobStorageSummarizer is not ready immediately on node startup because it needs to sample the blob filesystem to
|
||||
// determine which blobs are available.
|
||||
func (bs *BlobStorage) WaitForSummarizer(ctx context.Context) (BlobStorageSummarizer, error) {
|
||||
if bs == nil || bs.pruner == nil {
|
||||
return nil, ErrBlobStorageSummarizerUnavailable
|
||||
// If any blob storage directories are found for layouts besides the configured layout, migrate them.
|
||||
func (bs *BlobStorage) migrateLayouts() error {
|
||||
for _, name := range LayoutNames {
|
||||
if name == bs.layoutName {
|
||||
continue
|
||||
}
|
||||
from, err := newLayout(name, bs.fs, bs.cache, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := migrateLayout(bs.fs, from, bs.layout, bs.cache); err != nil {
|
||||
if errors.Is(err, errLayoutNotDetected) {
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(err, "failed to migrate layout from %s to %s", name, bs.layoutName)
|
||||
}
|
||||
}
|
||||
return bs.pruner.waitForCache(ctx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *BlobStorage) writePart(sidecar blocks.VerifiedROBlob) (ppath string, err error) {
|
||||
ident := identForSidecar(sidecar)
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to serialize sidecar data")
|
||||
}
|
||||
if len(sidecarData) == 0 {
|
||||
return "", errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(bs.layout.dir(ident), directoryPermissions()); err != nil {
|
||||
return "", err
|
||||
}
|
||||
ppath = bs.layout.partPath(ident, fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(ppath)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
defer func() {
|
||||
cerr := partialFile.Close()
|
||||
// The close error is probably less important than any existing error, so only overwrite nil err.
|
||||
if cerr != nil && err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
return ppath, errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return ppath, err
|
||||
}
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return ppath, fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
return ppath, nil
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForSidecar(sidecar)
|
||||
sszPath := fname.path()
|
||||
|
||||
ident := identForSidecar(sidecar)
|
||||
sszPath := bs.layout.sszPath(ident)
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -145,78 +215,36 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("Ignoring a duplicate blob sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
if bs.pruner != nil {
|
||||
if err := bs.pruner.notify(sidecar.BlockRoot(), sidecar.Slot(), sidecar.Index); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", sidecar.BlockRoot())
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the ethpb.BlobSidecar to binary data using SSZ.
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
partPath, err := bs.writePart(sidecar)
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved {
|
||||
if partialMoved || partPath == "" {
|
||||
return
|
||||
}
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
err := bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
if err := bs.layout.notify(ident); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", sidecar.BlockRoot())
|
||||
}
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
@@ -228,70 +256,30 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
// value is always a VerifiedROBlob.
|
||||
func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, error) {
|
||||
startTime := time.Now()
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
var v blocks.VerifiedROBlob
|
||||
ident, err := bs.layout.ident(root, idx)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
s := ðpb.BlobSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return v, err
|
||||
}
|
||||
ro, err := blocks.NewROBlobWithRoot(s, root)
|
||||
if err != nil {
|
||||
return blocks.VerifiedROBlob{}, err
|
||||
return verification.VerifiedROBlobError(err)
|
||||
}
|
||||
defer func() {
|
||||
blobFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
return verification.VerifiedROBlobFromDisk(bs.fs, root, bs.layout.sszPath(ident))
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
return bs.fs.RemoveAll(rootDir)
|
||||
dirIdent, err := bs.layout.dirIdent(root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = bs.layout.remove(dirIdent)
|
||||
return err
|
||||
}
|
||||
|
||||
// Indices generates a bitmap representing which BlobSidecar.Index values are present on disk for a given root.
|
||||
// This value can be compared to the commitments observed in a block to determine which indices need to be found
|
||||
// on the network to confirm data availability.
|
||||
func (bs *BlobStorage) Indices(root [32]byte, s primitives.Slot) ([]bool, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s)
|
||||
mask := make([]bool, maxBlobsPerBlock)
|
||||
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return mask, nil
|
||||
}
|
||||
return mask, err
|
||||
}
|
||||
|
||||
for i := range entries {
|
||||
if entries[i].IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entries[i].Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
u, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return mask, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
if u >= uint64(maxBlobsPerBlock) {
|
||||
return mask, errIndexOutOfBounds
|
||||
}
|
||||
mask[u] = true
|
||||
}
|
||||
return mask, nil
|
||||
// Summary returns the BlobStorageSummary from the layout.
|
||||
// Internally, this is a cached representation of the directory listing for the given root.
|
||||
func (bs *BlobStorage) Summary(root [32]byte) BlobStorageSummary {
|
||||
return bs.layout.summary(root)
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
@@ -316,36 +304,3 @@ func (bs *BlobStorage) WithinRetentionPeriod(requested, current primitives.Epoch
|
||||
}
|
||||
return requested+bs.retentionEpochs >= current
|
||||
}
|
||||
|
||||
type blobNamer struct {
|
||||
root [32]byte
|
||||
index uint64
|
||||
}
|
||||
|
||||
func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
func (p blobNamer) partPath(entropy string) string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%s-%d.%s", entropy, p.index, partExt))
|
||||
}
|
||||
|
||||
func (p blobNamer) path() string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, sszExt))
|
||||
}
|
||||
|
||||
func rootString(root [32]byte) string {
|
||||
return fmt.Sprintf("%#x", root)
|
||||
}
|
||||
|
||||
func stringToRoot(str string) ([32]byte, error) {
|
||||
slice, err := hexutil.Decode(str)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrapf(errInvalidRootString, "input=%s", str)
|
||||
}
|
||||
return bytesutil.ToBytes32(slice), nil
|
||||
}
|
||||
|
||||
@@ -9,26 +9,26 @@ import (
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, params.BeaconConfig().MaxBlobsPerBlock(1))
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, sidecars)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t)
|
||||
existingSidecar := testSidecars[0]
|
||||
|
||||
blobPath := namerForSidecar(existingSidecar).path()
|
||||
blobPath := bs.layout.sszPath(identForSidecar(existingSidecar))
|
||||
// Serialize the existing BlobSidecar to binary data.
|
||||
existingSidecarData, err := ssz.MarshalSSZ(existingSidecar)
|
||||
require.NoError(t, err)
|
||||
@@ -56,8 +56,8 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
require.NoError(t, bs.Save(sc))
|
||||
actualSc, err := bs.Get(sc.BlockRoot(), sc.Index)
|
||||
require.NoError(t, err)
|
||||
expectedIdx := []bool{false, false, true, false, false, false}
|
||||
actualIdx, err := bs.Indices(actualSc.BlockRoot(), 100)
|
||||
expectedIdx := blobIndexMask{false, false, true, false, false, false}
|
||||
actualIdx := bs.Summary(actualSc.BlockRoot()).mask
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedIdx, actualIdx)
|
||||
})
|
||||
@@ -85,7 +85,7 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
|
||||
require.NoError(t, bs.Remove(expected.BlockRoot()))
|
||||
_, err = bs.Get(expected.BlockRoot(), expected.Index)
|
||||
require.ErrorContains(t, "file does not exist", err)
|
||||
require.Equal(t, true, db.IsNotFound(err))
|
||||
})
|
||||
|
||||
t.Run("clear", func(t *testing.T) {
|
||||
@@ -126,16 +126,14 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// pollUntil polls a condition function until it returns true or a timeout is reached.
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
fs := afero.NewMemMapFs()
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0)) - 1
|
||||
writeFakeSSZ(t, fs, root, okIdx)
|
||||
indices, err := bs.Indices(root, 100)
|
||||
require.NoError(t, err)
|
||||
writeFakeSSZ(t, fs, root, 0, okIdx)
|
||||
bs := NewWarmedEphemeralBlobStorageUsingFs(t, fs, WithLayout(LayoutNameByEpoch))
|
||||
indices := bs.Summary(root).mask
|
||||
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
expected[okIdx] = true
|
||||
for i := range expected {
|
||||
@@ -143,102 +141,23 @@ func TestBlobIndicesBounds(t *testing.T) {
|
||||
}
|
||||
|
||||
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
writeFakeSSZ(t, fs, root, oobIdx)
|
||||
_, err = bs.Indices(root, 100)
|
||||
require.ErrorIs(t, err, errIndexOutOfBounds)
|
||||
writeFakeSSZ(t, fs, root, 0, oobIdx)
|
||||
// This now fails at cache warmup time.
|
||||
require.ErrorIs(t, warmCache(bs.layout, bs.cache), errIndexOutOfBounds)
|
||||
}
|
||||
|
||||
func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
|
||||
namer := blobNamer{root: root, index: idx}
|
||||
require.NoError(t, fs.MkdirAll(namer.dir(), 0700))
|
||||
fh, err := fs.Create(namer.path())
|
||||
func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, slot primitives.Slot, idx uint64) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
namer := newBlobIdent(root, epoch, idx)
|
||||
layout := periodicEpochLayout{}
|
||||
require.NoError(t, fs.MkdirAll(layout.dir(namer), 0700))
|
||||
fh, err := fs.Create(layout.sszPath(namer))
|
||||
require.NoError(t, err)
|
||||
_, err = fh.Write([]byte("derp"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fh.Close())
|
||||
}
|
||||
|
||||
func TestBlobStoragePrune(t *testing.T) {
|
||||
currentSlot := primitives.Slot(200000)
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
|
||||
t.Run("PruneOne", func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 300, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, sidecar := range testSidecars {
|
||||
require.NoError(t, bs.Save(sidecar))
|
||||
}
|
||||
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remainingFolders))
|
||||
})
|
||||
t.Run("Prune dangling blob", func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 299, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, sidecar := range testSidecars[4:] {
|
||||
require.NoError(t, bs.Save(sidecar))
|
||||
}
|
||||
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remainingFolders))
|
||||
})
|
||||
t.Run("PruneMany", func(t *testing.T) {
|
||||
blockQty := 10
|
||||
slot := primitives.Slot(1)
|
||||
|
||||
for j := 0; j <= blockQty; j++ {
|
||||
root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32))
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.Save(testSidecars[0]))
|
||||
|
||||
slot += 10000
|
||||
}
|
||||
|
||||
require.NoError(t, bs.pruner.prune(currentSlot-bs.pruner.windowSize))
|
||||
|
||||
remainingFolders, err := afero.ReadDir(fs, ".")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, len(remainingFolders))
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkPruning(b *testing.B) {
|
||||
var t *testing.T
|
||||
_, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
|
||||
blockQty := 10000
|
||||
currentSlot := primitives.Slot(150000)
|
||||
slot := primitives.Slot(0)
|
||||
|
||||
for j := 0; j <= blockQty; j++ {
|
||||
root := bytesutil.ToBytes32(bytesutil.ToBytes(uint64(slot), 32))
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, root, slot, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.Save(testSidecars[0]))
|
||||
|
||||
slot += 100
|
||||
}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := bs.pruner.prune(currentSlot)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBlobStorage(t *testing.T) {
|
||||
_, err := NewBlobStorage()
|
||||
require.ErrorIs(t, err, errNoBasePath)
|
||||
@@ -292,3 +211,13 @@ func TestConfig_WithinRetentionPeriod(t *testing.T) {
|
||||
require.Equal(t, true, storage.WithinRetentionPeriod(1, 1))
|
||||
})
|
||||
}
|
||||
|
||||
func TestLayoutNames(t *testing.T) {
|
||||
badLayoutName := "bad"
|
||||
for _, name := range LayoutNames {
|
||||
_, err := newLayout(name, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
_, err := newLayout(badLayoutName, nil, nil, nil)
|
||||
require.ErrorIs(t, err, errInvalidLayoutName)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -13,17 +15,12 @@ type blobIndexMask []bool
|
||||
|
||||
// BlobStorageSummary represents cached information about the BlobSidecars on disk for each root the cache knows about.
|
||||
type BlobStorageSummary struct {
|
||||
slot primitives.Slot
|
||||
mask blobIndexMask
|
||||
epoch primitives.Epoch
|
||||
mask blobIndexMask
|
||||
}
|
||||
|
||||
// HasIndex returns true if the BlobSidecar at the given index is available in the filesystem.
|
||||
func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
// Protect from panic, but assume callers are sophisticated enough to not need an error telling them they have an invalid idx.
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s.slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return false
|
||||
}
|
||||
if idx >= uint64(len(s.mask)) {
|
||||
return false
|
||||
}
|
||||
@@ -32,10 +29,6 @@ func (s BlobStorageSummary) HasIndex(idx uint64) bool {
|
||||
|
||||
// AllAvailable returns true if we have all blobs for all indices from 0 to count-1.
|
||||
func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s.slot)
|
||||
if count > maxBlobsPerBlock {
|
||||
return false
|
||||
}
|
||||
if count > len(s.mask) {
|
||||
return false
|
||||
}
|
||||
@@ -47,83 +40,121 @@ func (s BlobStorageSummary) AllAvailable(count int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s BlobStorageSummary) MaxBlobsForEpoch() uint64 {
|
||||
return uint64(params.BeaconConfig().MaxBlobsPerBlockAtEpoch(s.epoch))
|
||||
}
|
||||
|
||||
// NewBlobStorageSummary creates a new BlobStorageSummary for a given epoch and mask.
|
||||
func NewBlobStorageSummary(epoch primitives.Epoch, mask []bool) (BlobStorageSummary, error) {
|
||||
c := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(epoch)
|
||||
if len(mask) != c {
|
||||
return BlobStorageSummary{}, fmt.Errorf("mask length %d does not match expected %d for epoch %d", len(mask), c, epoch)
|
||||
}
|
||||
return BlobStorageSummary{
|
||||
epoch: epoch,
|
||||
mask: mask,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BlobStorageSummarizer can be used to receive a summary of metadata about blobs on disk for a given root.
|
||||
// The BlobStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type BlobStorageSummarizer interface {
|
||||
Summary(root [32]byte) BlobStorageSummary
|
||||
}
|
||||
|
||||
type blobStorageCache struct {
|
||||
type blobStorageSummaryCache struct {
|
||||
mu sync.RWMutex
|
||||
nBlobs float64
|
||||
cache map[[32]byte]BlobStorageSummary
|
||||
}
|
||||
|
||||
var _ BlobStorageSummarizer = &blobStorageCache{}
|
||||
var _ BlobStorageSummarizer = &blobStorageSummaryCache{}
|
||||
|
||||
func newBlobStorageCache() *blobStorageCache {
|
||||
return &blobStorageCache{
|
||||
cache: make(map[[32]byte]BlobStorageSummary, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest*fieldparams.SlotsPerEpoch),
|
||||
func newBlobStorageCache() *blobStorageSummaryCache {
|
||||
return &blobStorageSummaryCache{
|
||||
cache: make(map[[32]byte]BlobStorageSummary),
|
||||
}
|
||||
}
|
||||
|
||||
// Summary returns the BlobStorageSummary for `root`. The BlobStorageSummary can be used to check for the presence of
|
||||
// BlobSidecars based on Index.
|
||||
func (s *blobStorageCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
func (s *blobStorageSummaryCache) Summary(root [32]byte) BlobStorageSummary {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return s.cache[root]
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) ensure(key [32]byte, slot primitives.Slot, idx uint64) error {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
func (s *blobStorageSummaryCache) ensure(ident blobIdent) error {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlockAtEpoch(ident.epoch)
|
||||
if ident.index >= uint64(maxBlobsPerBlock) {
|
||||
return errIndexOutOfBounds
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v := s.cache[key]
|
||||
v.slot = slot
|
||||
v := s.cache[ident.root]
|
||||
v.epoch = ident.epoch
|
||||
if v.mask == nil {
|
||||
v.mask = make(blobIndexMask, maxBlobsPerBlock)
|
||||
}
|
||||
if !v.mask[idx] {
|
||||
if !v.mask[ident.index] {
|
||||
s.updateMetrics(1)
|
||||
}
|
||||
v.mask[idx] = true
|
||||
s.cache[key] = v
|
||||
v.mask[ident.index] = true
|
||||
s.cache[ident.root] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) slot(key [32]byte) (primitives.Slot, bool) {
|
||||
func (s *blobStorageSummaryCache) get(key [32]byte) (BlobStorageSummary, bool) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
v, ok := s.cache[key]
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
return v.slot, ok
|
||||
return v, ok
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) evict(key [32]byte) {
|
||||
var deleted float64
|
||||
func (s *blobStorageSummaryCache) identForIdx(key [32]byte, idx uint64) (blobIdent, error) {
|
||||
v, ok := s.get(key)
|
||||
if !ok || !v.HasIndex(idx) {
|
||||
return blobIdent{}, db.ErrNotFound
|
||||
}
|
||||
return blobIdent{
|
||||
root: key,
|
||||
index: idx,
|
||||
epoch: v.epoch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *blobStorageSummaryCache) identForRoot(key [32]byte) (blobIdent, error) {
|
||||
v, ok := s.get(key)
|
||||
if !ok {
|
||||
return blobIdent{}, db.ErrNotFound
|
||||
}
|
||||
return blobIdent{
|
||||
root: key,
|
||||
epoch: v.epoch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *blobStorageSummaryCache) evict(key [32]byte) int {
|
||||
deleted := 0
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
v, ok := s.cache[key]
|
||||
if ok {
|
||||
for i := range v.mask {
|
||||
if v.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
for i := range v.mask {
|
||||
if v.mask[i] {
|
||||
deleted += 1
|
||||
}
|
||||
}
|
||||
delete(s.cache, key)
|
||||
s.mu.Unlock()
|
||||
if deleted > 0 {
|
||||
s.updateMetrics(-deleted)
|
||||
s.updateMetrics(-float64(deleted))
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
func (s *blobStorageCache) updateMetrics(delta float64) {
|
||||
func (s *blobStorageSummaryCache) updateMetrics(delta float64) {
|
||||
s.nBlobs += delta
|
||||
blobDiskCount.Set(s.nBlobs)
|
||||
blobDiskSize.Set(s.nBlobs * fieldparams.BlobSidecarSize)
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
if c.expected != nil {
|
||||
key := bytesutil.ToBytes32([]byte(c.name))
|
||||
sc.cache[key] = BlobStorageSummary{slot: 0, mask: c.expected}
|
||||
sc.cache[key] = BlobStorageSummary{epoch: 0, mask: c.expected}
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
|
||||
238
beacon-chain/db/filesystem/iteration.go
Normal file
238
beacon-chain/db/filesystem/iteration.go
Normal file
@@ -0,0 +1,238 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
var errIdentFailure = errors.New("failed to determine blob metadata, ignoring all sub-paths.")
|
||||
|
||||
type identificationError struct {
|
||||
err error
|
||||
path string
|
||||
ident blobIdent
|
||||
}
|
||||
|
||||
func (ide *identificationError) Error() string {
|
||||
return fmt.Sprintf("%s path=%s, err=%s", errIdentFailure.Error(), ide.path, ide.err.Error())
|
||||
}
|
||||
|
||||
func (ide *identificationError) Unwrap() error {
|
||||
return ide.err
|
||||
}
|
||||
|
||||
func (*identificationError) Is(err error) bool {
|
||||
return err == errIdentFailure
|
||||
}
|
||||
|
||||
func (ide *identificationError) LogFields() logrus.Fields {
|
||||
fields := ide.ident.logFields()
|
||||
fields["path"] = ide.path
|
||||
return fields
|
||||
}
|
||||
|
||||
func newIdentificationError(path string, ident blobIdent, err error) *identificationError {
|
||||
return &identificationError{path: path, ident: ident, err: err}
|
||||
}
|
||||
|
||||
func listDir(fs afero.Fs, dir string) ([]string, error) {
|
||||
top, err := fs.Open(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open directory descriptor")
|
||||
}
|
||||
defer func() {
|
||||
if err := top.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close file %s", dir)
|
||||
}
|
||||
}()
|
||||
// re the -1 param: "If n <= 0, Readdirnames returns all the names from the directory in a single slice"
|
||||
dirs, err := top.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read directory listing")
|
||||
}
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
// identPopulator is a function that sets values in the blobIdent for a given layer of the filesystem layout.
|
||||
type identPopulator func(blobIdent, string) (blobIdent, error)
|
||||
|
||||
// layoutLayer represents a layer of the nested directory scheme. Each layer is defined by a filter function that
|
||||
// ensures any entries at that layer of the scheme are named in a valid way, and a populateIdent function that
|
||||
// parses the directory name into a blobIdent object, used for iterating across the layout in a layout-independent way.
|
||||
type layoutLayer struct {
|
||||
populateIdent identPopulator
|
||||
filter func(string) bool
|
||||
}
|
||||
|
||||
// identIterator moves through the filesystem in order to yield blobIdents.
|
||||
// layoutLayers (in the 'layers' field) allows a filesystem layout to control how the
|
||||
// layout is traversed. A layoutLayer can filter out entries from the directory listing
|
||||
// via the filter function, and populate fields in the blobIdent via the populateIdent function.
|
||||
// The blobIdent is populated from an empty value at the root, accumulating values for its fields at each layer.
|
||||
// The fully populated blobIdent is returned when the iterator reaches the leaf layer.
|
||||
type identIterator struct {
|
||||
fs afero.Fs
|
||||
path string
|
||||
child *identIterator
|
||||
ident blobIdent
|
||||
// layoutLayers are the heart of how the layout defines the nesting of the components of the path.
|
||||
// Each layer of the layout represents a different layer of the directory layout hierarchy,
|
||||
// from the relative root at the zero index to the blob files at the end.
|
||||
layers []layoutLayer
|
||||
entries []string
|
||||
offset int
|
||||
eof bool
|
||||
}
|
||||
|
||||
// atEOF can be used to peek at the iterator to see if it's already finished. This is useful for the migration code to check
|
||||
// if there are any entries in the directory indicated by the migration.
|
||||
func (iter *identIterator) atEOF() bool {
|
||||
return iter.eof
|
||||
}
|
||||
|
||||
// next is the only method that a user of the identIterator needs to call.
|
||||
// identIterator will yield blobIdents in a breadth-first fashion,
|
||||
// returning an empty blobIdent and io.EOF once all branches have been traversed.
|
||||
func (iter *identIterator) next() (blobIdent, error) {
|
||||
if iter.eof {
|
||||
return blobIdent{}, io.EOF
|
||||
}
|
||||
if iter.child != nil {
|
||||
next, err := iter.child.next()
|
||||
if err == nil {
|
||||
return next, nil
|
||||
}
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return blobIdent{}, err
|
||||
}
|
||||
}
|
||||
return iter.advanceChild()
|
||||
}
|
||||
|
||||
// advanceChild is used to move to the next directory at each layer of the tree, either when
|
||||
// the nodes are first being initialized at a layer, or when a sub-branch has been exhausted.
|
||||
func (iter *identIterator) advanceChild() (blobIdent, error) {
|
||||
defer func() {
|
||||
iter.offset += 1
|
||||
}()
|
||||
for i := iter.offset; i < len(iter.entries); i++ {
|
||||
iter.offset = i
|
||||
nextPath := filepath.Join(iter.path, iter.entries[iter.offset])
|
||||
nextLayer := iter.layers[0]
|
||||
if !nextLayer.filter(nextPath) {
|
||||
continue
|
||||
}
|
||||
ident, err := nextLayer.populateIdent(iter.ident, nextPath)
|
||||
if err != nil {
|
||||
return ident, newIdentificationError(nextPath, ident, err)
|
||||
}
|
||||
// if we're at the leaf layer , we can return the updated ident.
|
||||
if len(iter.layers) == 1 {
|
||||
return ident, nil
|
||||
}
|
||||
|
||||
entries, err := listDir(iter.fs, nextPath)
|
||||
if err != nil {
|
||||
return blobIdent{}, err
|
||||
}
|
||||
if len(entries) == 0 {
|
||||
continue
|
||||
}
|
||||
iter.child = &identIterator{
|
||||
fs: iter.fs,
|
||||
path: nextPath,
|
||||
ident: ident,
|
||||
layers: iter.layers[1:],
|
||||
entries: entries,
|
||||
}
|
||||
return iter.child.next()
|
||||
}
|
||||
|
||||
return blobIdent{}, io.EOF
|
||||
}
|
||||
|
||||
func populateNoop(namer blobIdent, _ string) (blobIdent, error) {
|
||||
return namer, nil
|
||||
}
|
||||
|
||||
func populateRoot(namer blobIdent, dir string) (blobIdent, error) {
|
||||
root, err := rootFromPath(dir)
|
||||
if err != nil {
|
||||
return namer, err
|
||||
}
|
||||
namer.root = root
|
||||
return namer, nil
|
||||
}
|
||||
|
||||
func populateIndex(namer blobIdent, fname string) (blobIdent, error) {
|
||||
idx, err := idxFromPath(fname)
|
||||
if err != nil {
|
||||
return namer, err
|
||||
}
|
||||
namer.index = idx
|
||||
return namer, nil
|
||||
}
|
||||
|
||||
func rootFromPath(p string) ([32]byte, error) {
|
||||
subdir := filepath.Base(p)
|
||||
root, err := stringToRoot(subdir)
|
||||
if err != nil {
|
||||
return root, errors.Wrapf(err, "invalid directory, could not parse subdir as root %s", p)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func idxFromPath(p string) (uint64, error) {
|
||||
p = path.Base(p)
|
||||
|
||||
if !isSszFile(p) {
|
||||
return 0, errors.Wrap(errNotBlobSSZ, "does not have .ssz extension")
|
||||
}
|
||||
parts := strings.Split(p, ".")
|
||||
if len(parts) != 2 {
|
||||
return 0, errors.Wrap(errNotBlobSSZ, "unexpected filename structure (want <index>.ssz)")
|
||||
}
|
||||
idx, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
func filterNoop(_ string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func isRootDir(p string) bool {
|
||||
dir := filepath.Base(p)
|
||||
return len(dir) == rootStringLen && strings.HasPrefix(dir, "0x")
|
||||
}
|
||||
|
||||
func isSszFile(s string) bool {
|
||||
return filepath.Ext(s) == "."+sszExt
|
||||
}
|
||||
|
||||
func rootToString(root [32]byte) string {
|
||||
return fmt.Sprintf("%#x", root)
|
||||
}
|
||||
|
||||
func stringToRoot(str string) ([32]byte, error) {
|
||||
if len(str) != rootStringLen {
|
||||
return [32]byte{}, errors.Wrapf(errInvalidRootString, "incorrect len for input=%s", str)
|
||||
}
|
||||
slice, err := hexutil.Decode(str)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrapf(errInvalidRootString, "input=%s", str)
|
||||
}
|
||||
return bytesutil.ToBytes32(slice), nil
|
||||
}
|
||||
304
beacon-chain/db/filesystem/iteration_test.go
Normal file
304
beacon-chain/db/filesystem/iteration_test.go
Normal file
@@ -0,0 +1,304 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func TestRootFromDir(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
dir string
|
||||
err error
|
||||
root [32]byte
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
root: [32]byte{255, 255, 135, 94, 29, 152, 92, 92, 203, 33, 72, 148, 152, 63, 36, 40,
|
||||
237, 178, 113, 240, 248, 123, 104, 186, 112, 16, 228, 169, 157, 243, 181, 203},
|
||||
},
|
||||
{
|
||||
name: "too short",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5c",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
{
|
||||
name: "too log",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cbb",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
{
|
||||
name: "missing prefix",
|
||||
dir: "ffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
root, err := stringToRoot(c.dir)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.root, root)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlotFromFile(t *testing.T) {
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc := verification.FakeVerifyForTest(t, sidecars[0])
|
||||
require.NoError(t, bs.Save(sc))
|
||||
namer := identForSidecar(sc)
|
||||
sszPath := bs.layout.sszPath(namer)
|
||||
slot, err := slotFromFile(sszPath, fs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.slot, slot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type dirFiles struct {
|
||||
name string
|
||||
isDir bool
|
||||
children []dirFiles
|
||||
}
|
||||
|
||||
func (df dirFiles) reify(t *testing.T, fs afero.Fs, base string) {
|
||||
fullPath := path.Join(base, df.name)
|
||||
if df.isDir {
|
||||
if df.name != "" {
|
||||
require.NoError(t, fs.Mkdir(fullPath, directoryPermissions()))
|
||||
}
|
||||
for _, c := range df.children {
|
||||
c.reify(t, fs, fullPath)
|
||||
}
|
||||
} else {
|
||||
fp, err := fs.Create(fullPath)
|
||||
require.NoError(t, err)
|
||||
_, err = fp.WriteString("derp")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (df dirFiles) childNames() []string {
|
||||
cn := make([]string, len(df.children))
|
||||
for i := range df.children {
|
||||
cn[i] = df.children[i].name
|
||||
}
|
||||
return cn
|
||||
}
|
||||
|
||||
func TestListDir(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
rootStrs := []string{
|
||||
"0x0023dc5d063c7c1b37016bb54963c6ff4bfe5dfdf6dac29e7ceeb2b8fa81ed7a",
|
||||
"0xff30526cd634a5af3a09cc9bff67f33a621fc5b975750bb4432f74df077554b4",
|
||||
"0x23f5f795aaeb78c01fadaf3d06da2e99bd4b3622ae4dfea61b05b7d9adb119c2",
|
||||
}
|
||||
|
||||
// parent directory
|
||||
tree := dirFiles{isDir: true}
|
||||
// break out each subdir for easier assertions
|
||||
notABlob := dirFiles{name: "notABlob", isDir: true}
|
||||
childlessBlob := dirFiles{name: rootStrs[0], isDir: true}
|
||||
blobWithSsz := dirFiles{name: rootStrs[1], isDir: true,
|
||||
children: []dirFiles{{name: "1.ssz"}, {name: "2.ssz"}},
|
||||
}
|
||||
blobWithSszAndTmp := dirFiles{name: rootStrs[2], isDir: true,
|
||||
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
|
||||
tree.children = append(tree.children,
|
||||
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
|
||||
|
||||
topChildren := make([]string, len(tree.children))
|
||||
for i := range tree.children {
|
||||
topChildren[i] = tree.children[i].name
|
||||
}
|
||||
|
||||
var filter = func(entries []string, filt func(string) bool) []string {
|
||||
filtered := make([]string, 0, len(entries))
|
||||
for i := range entries {
|
||||
if filt(entries[i]) {
|
||||
filtered = append(filtered, entries[i])
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
tree.reify(t, fs, "")
|
||||
cases := []struct {
|
||||
name string
|
||||
dirPath string
|
||||
expected []string
|
||||
filter func(string) bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "non-existent",
|
||||
dirPath: "derp",
|
||||
expected: []string{},
|
||||
err: os.ErrNotExist,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
dirPath: childlessBlob.name,
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "top",
|
||||
dirPath: ".",
|
||||
expected: topChildren,
|
||||
},
|
||||
{
|
||||
name: "custom filter: only notABlob",
|
||||
dirPath: ".",
|
||||
expected: []string{notABlob.name},
|
||||
filter: func(s string) bool {
|
||||
return s == notABlob.name
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "root filter",
|
||||
dirPath: ".",
|
||||
expected: []string{childlessBlob.name, blobWithSsz.name, blobWithSszAndTmp.name},
|
||||
filter: isRootDir,
|
||||
},
|
||||
{
|
||||
name: "ssz filter",
|
||||
dirPath: blobWithSsz.name,
|
||||
expected: blobWithSsz.childNames(),
|
||||
filter: isSszFile,
|
||||
},
|
||||
{
|
||||
name: "ssz mixed filter",
|
||||
dirPath: blobWithSszAndTmp.name,
|
||||
expected: []string{"5.ssz"},
|
||||
filter: isSszFile,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
result, err := listDir(fs, c.dirPath)
|
||||
if c.filter != nil {
|
||||
result = filter(result, c.filter)
|
||||
}
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
require.Equal(t, 0, len(result))
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sort.Strings(c.expected)
|
||||
sort.Strings(result)
|
||||
require.DeepEqual(t, c.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlotFromBlob(t *testing.T) {
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc := sidecars[0]
|
||||
enc, err := sc.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
slot, err := slotFromBlob(bytes.NewReader(enc))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.slot, slot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIterationComplete(t *testing.T) {
|
||||
targets := []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
path: "by-epoch/0/1234/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777217/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
path: "by-epoch/2/11235/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
|
||||
},
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
cache := newBlobStorageCache()
|
||||
byEpoch, err := newLayout(LayoutNameByEpoch, fs, cache, nil)
|
||||
require.NoError(t, err)
|
||||
for _, tar := range targets {
|
||||
setupTestBlobFile(t, tar.ident, tar.slotOffset, fs, byEpoch)
|
||||
}
|
||||
iter, err := byEpoch.iterateIdents(0)
|
||||
require.NoError(t, err)
|
||||
nIdents := 0
|
||||
for ident, err := iter.next(); err != io.EOF; ident, err = iter.next() {
|
||||
require.NoError(t, err)
|
||||
nIdents++
|
||||
require.NoError(t, cache.ensure(ident))
|
||||
}
|
||||
require.Equal(t, len(targets), nIdents)
|
||||
for _, tar := range targets {
|
||||
entry, ok := cache.get(tar.ident.root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, tar.ident.epoch, entry.epoch)
|
||||
require.Equal(t, true, entry.HasIndex(tar.ident.index))
|
||||
require.Equal(t, tar.path, byEpoch.sszPath(tar.ident))
|
||||
}
|
||||
}
|
||||
291
beacon-chain/db/filesystem/layout.go
Normal file
291
beacon-chain/db/filesystem/layout.go
Normal file
@@ -0,0 +1,291 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
const (
|
||||
// Full root in directory will be 66 chars, eg:
|
||||
// >>> len('0x0002fb4db510b8618b04dc82d023793739c26346a8b02eb73482e24b0fec0555') == 66
|
||||
rootStringLen = 66
|
||||
sszExt = "ssz"
|
||||
partExt = "part"
|
||||
periodicEpochBaseDir = "by-epoch"
|
||||
)
|
||||
|
||||
const (
|
||||
LayoutNameFlat = "flat"
|
||||
LayoutNameByEpoch = "by-epoch"
|
||||
)
|
||||
|
||||
var LayoutNames = []string{LayoutNameFlat, LayoutNameByEpoch}
|
||||
|
||||
var (
|
||||
errMigrationFailure = errors.New("unable to migrate blob directory between old and new layout")
|
||||
errCacheWarmFailed = errors.New("failed to warm blob filesystem cache")
|
||||
errPruneFailed = errors.New("failed to prune root")
|
||||
errInvalidRootString = errors.New("Could not parse hex string as a [32]byte")
|
||||
errInvalidDirectoryLayout = errors.New("Could not parse blob directory path")
|
||||
errInvalidLayoutName = errors.New("unknown layout name")
|
||||
errLayoutNotDetected = errors.New("given layout not observed in the blob filesystem tree")
|
||||
)
|
||||
|
||||
type blobIdent struct {
|
||||
root [32]byte
|
||||
epoch primitives.Epoch
|
||||
index uint64
|
||||
}
|
||||
|
||||
func newBlobIdent(root [32]byte, epoch primitives.Epoch, index uint64) blobIdent {
|
||||
return blobIdent{root: root, epoch: epoch, index: index}
|
||||
}
|
||||
|
||||
func identForSidecar(sc blocks.VerifiedROBlob) blobIdent {
|
||||
return newBlobIdent(sc.BlockRoot(), slots.ToEpoch(sc.Slot()), sc.Index)
|
||||
}
|
||||
|
||||
func (n blobIdent) sszFname() string {
|
||||
return fmt.Sprintf("%d.%s", n.index, sszExt)
|
||||
}
|
||||
|
||||
func (n blobIdent) partFname(entropy string) string {
|
||||
return fmt.Sprintf("%s-%d.%s", entropy, n.index, partExt)
|
||||
}
|
||||
|
||||
func (n blobIdent) logFields() logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", n.root),
|
||||
"epoch": n.epoch,
|
||||
"index": n.index,
|
||||
}
|
||||
}
|
||||
|
||||
type fsLayout interface {
|
||||
name() string
|
||||
dir(n blobIdent) string
|
||||
sszPath(n blobIdent) string
|
||||
partPath(n blobIdent, entropy string) string
|
||||
iterateIdents(before primitives.Epoch) (*identIterator, error)
|
||||
ident(root [32]byte, idx uint64) (blobIdent, error)
|
||||
dirIdent(root [32]byte) (blobIdent, error)
|
||||
summary(root [32]byte) BlobStorageSummary
|
||||
notify(ident blobIdent) error
|
||||
pruneBefore(before primitives.Epoch) (*pruneSummary, error)
|
||||
remove(ident blobIdent) (int, error)
|
||||
blockParentDirs(ident blobIdent) []string
|
||||
}
|
||||
|
||||
func newLayout(name string, fs afero.Fs, cache *blobStorageSummaryCache, pruner *blobPruner) (fsLayout, error) {
|
||||
switch name {
|
||||
case LayoutNameFlat:
|
||||
return newFlatLayout(fs, cache, pruner), nil
|
||||
case LayoutNameByEpoch:
|
||||
return newPeriodicEpochLayout(fs, cache, pruner), nil
|
||||
default:
|
||||
return nil, errors.Wrapf(errInvalidLayoutName, "name=%s", name)
|
||||
}
|
||||
}
|
||||
|
||||
func warmCache(l fsLayout, cache *blobStorageSummaryCache) error {
|
||||
iter, err := l.iterateIdents(0)
|
||||
if err != nil {
|
||||
return errors.Wrap(errCacheWarmFailed, err.Error())
|
||||
}
|
||||
for ident, err := iter.next(); !errors.Is(err, io.EOF); ident, err = iter.next() {
|
||||
if errors.Is(err, errIdentFailure) {
|
||||
idf := &identificationError{}
|
||||
if errors.As(err, &idf) {
|
||||
log.WithFields(idf.LogFields()).WithError(err).Error("Failed to cache blob data for path")
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: failed to populate blob data cache: %w", errCacheWarmFailed, err)
|
||||
}
|
||||
if err := cache.ensure(ident); err != nil {
|
||||
return fmt.Errorf("%w: failed to write cache entry for %s: %w", errCacheWarmFailed, l.sszPath(ident), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateLayout(fs afero.Fs, from, to fsLayout, cache *blobStorageSummaryCache) error {
|
||||
start := time.Now()
|
||||
iter, err := from.iterateIdents(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(errMigrationFailure, "failed to iterate legacy structure while migrating blobs, err=%s", err.Error())
|
||||
}
|
||||
if iter.atEOF() {
|
||||
return errLayoutNotDetected
|
||||
}
|
||||
log.WithField("fromLayout", from.name()).WithField("toLayout", to.name()).Info("Migrating blob filesystem layout. This one-time operation can take extra time (up to a few minutes for systems with extended blob storage and a cold disk cache).")
|
||||
lastMoved := ""
|
||||
parentDirs := make(map[string]bool) // this map should have < 65k keys by design
|
||||
moved := 0
|
||||
dc := newDirCleaner()
|
||||
for ident, err := iter.next(); !errors.Is(err, io.EOF); ident, err = iter.next() {
|
||||
if err != nil {
|
||||
if errors.Is(err, errIdentFailure) {
|
||||
idf := &identificationError{}
|
||||
if errors.As(err, &idf) {
|
||||
log.WithFields(idf.LogFields()).WithError(err).Error("Failed to migrate blob path")
|
||||
}
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(errMigrationFailure, "failed to iterate previous layout structure while migrating blobs, err=%s", err.Error())
|
||||
}
|
||||
src := from.dir(ident)
|
||||
target := to.dir(ident)
|
||||
if src != lastMoved {
|
||||
targetParent := filepath.Dir(target)
|
||||
if targetParent != "" && targetParent != "." && !parentDirs[targetParent] {
|
||||
if err := fs.MkdirAll(targetParent, directoryPermissions()); err != nil {
|
||||
return errors.Wrapf(errMigrationFailure, "failed to make enclosing path before moving %s to %s", src, target)
|
||||
}
|
||||
parentDirs[targetParent] = true
|
||||
}
|
||||
if err := fs.Rename(src, target); err != nil {
|
||||
return errors.Wrapf(errMigrationFailure, "could not rename %s to %s", src, target)
|
||||
}
|
||||
moved += 1
|
||||
lastMoved = src
|
||||
for _, dir := range from.blockParentDirs(ident) {
|
||||
dc.add(dir)
|
||||
}
|
||||
}
|
||||
if err := cache.ensure(ident); err != nil {
|
||||
return errors.Wrapf(errMigrationFailure, "could not cache path %s, err=%s", to.sszPath(ident), err.Error())
|
||||
}
|
||||
}
|
||||
dc.clean(fs)
|
||||
if moved > 0 {
|
||||
log.WithField("dirsMoved", moved).WithField("elapsed", time.Since(start)).
|
||||
Info("Blob filesystem migration complete.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type dirCleaner struct {
|
||||
maxDepth int
|
||||
layers map[int]map[string]struct{}
|
||||
}
|
||||
|
||||
func newDirCleaner() *dirCleaner {
|
||||
return &dirCleaner{layers: make(map[int]map[string]struct{})}
|
||||
}
|
||||
|
||||
func (d *dirCleaner) add(dir string) {
|
||||
nLayers := len(strings.Split(dir, string(filepath.Separator)))
|
||||
_, ok := d.layers[nLayers]
|
||||
if !ok {
|
||||
d.layers[nLayers] = make(map[string]struct{})
|
||||
}
|
||||
d.layers[nLayers][dir] = struct{}{}
|
||||
if nLayers > d.maxDepth {
|
||||
d.maxDepth = nLayers
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dirCleaner) clean(fs afero.Fs) {
|
||||
for i := d.maxDepth; i >= 0; i-- {
|
||||
d.cleanLayer(fs, i)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dirCleaner) cleanLayer(fs afero.Fs, layer int) {
|
||||
dirs, ok := d.layers[layer]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for dir := range dirs {
|
||||
// Use Remove rather than RemoveAll to make sure we're only removing empty directories
|
||||
if err := fs.Remove(dir); err != nil {
|
||||
log.WithField("dir", dir).WithError(err).Error("Failed to remove blob directory, please remove it manually if desired.")
|
||||
contents, err := listDir(fs, dir)
|
||||
if err != nil {
|
||||
log.WithField("dir", dir).WithError(err).Error("Could not list blob directory contents to find reason for removal failure.")
|
||||
continue
|
||||
}
|
||||
for _, c := range contents {
|
||||
log.WithField("file", c).WithField("dir", dir).Debug("Unexpected file blocking migrated blob directory cleanup.")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type pruneSummary struct {
|
||||
blobsPruned int
|
||||
failedRemovals []string
|
||||
}
|
||||
|
||||
func (s pruneSummary) LogFields() logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"blobsPruned": s.blobsPruned,
|
||||
"failedRemovals": len(s.failedRemovals),
|
||||
}
|
||||
}
|
||||
|
||||
func pruneBefore(before primitives.Epoch, l fsLayout) (map[primitives.Epoch]*pruneSummary, error) {
|
||||
sums := make(map[primitives.Epoch]*pruneSummary)
|
||||
iter, err := l.iterateIdents(before)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to iterate blob paths for pruning")
|
||||
}
|
||||
|
||||
// We will get an ident for each index, but want to prune all indexes for the given root together.
|
||||
var lastIdent blobIdent
|
||||
for ident, err := iter.next(); !errors.Is(err, io.EOF); ident, err = iter.next() {
|
||||
if err != nil {
|
||||
if errors.Is(err, errIdentFailure) {
|
||||
idf := &identificationError{}
|
||||
if errors.As(err, &idf) {
|
||||
log.WithFields(idf.LogFields()).WithError(err).Error("Failed to prune blob path due to identification errors")
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Error("encountered unhandled error during pruning")
|
||||
return nil, errors.Wrap(errPruneFailed, err.Error())
|
||||
}
|
||||
if ident.epoch >= before {
|
||||
continue
|
||||
}
|
||||
if lastIdent.root != ident.root {
|
||||
pruneOne(lastIdent, l, sums)
|
||||
lastIdent = ident
|
||||
}
|
||||
}
|
||||
// handle the final ident
|
||||
pruneOne(lastIdent, l, sums)
|
||||
|
||||
return sums, nil
|
||||
}
|
||||
|
||||
func pruneOne(ident blobIdent, l fsLayout, sums map[primitives.Epoch]*pruneSummary) {
|
||||
// Skip pruning the n-1 ident if we're on the first real ident (lastIdent will be zero value).
|
||||
if ident.root == params.BeaconConfig().ZeroHash {
|
||||
return
|
||||
}
|
||||
_, ok := sums[ident.epoch]
|
||||
if !ok {
|
||||
sums[ident.epoch] = &pruneSummary{}
|
||||
}
|
||||
s := sums[ident.epoch]
|
||||
removed, err := l.remove(ident)
|
||||
if err != nil {
|
||||
s.failedRemovals = append(s.failedRemovals, l.dir(ident))
|
||||
log.WithField("root", fmt.Sprintf("%#x", ident.root)).Error("Failed to delete blob directory for root")
|
||||
}
|
||||
s.blobsPruned += removed
|
||||
}
|
||||
212
beacon-chain/db/filesystem/layout_by_epoch.go
Normal file
212
beacon-chain/db/filesystem/layout_by_epoch.go
Normal file
@@ -0,0 +1,212 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
const epochsPerDirectory = 4096
|
||||
|
||||
type periodicEpochLayout struct {
|
||||
fs afero.Fs
|
||||
cache *blobStorageSummaryCache
|
||||
pruner *blobPruner
|
||||
}
|
||||
|
||||
var _ fsLayout = &periodicEpochLayout{}
|
||||
|
||||
func newPeriodicEpochLayout(fs afero.Fs, cache *blobStorageSummaryCache, pruner *blobPruner) fsLayout {
|
||||
l := &periodicEpochLayout{fs: fs, cache: cache, pruner: pruner}
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) name() string {
|
||||
return LayoutNameByEpoch
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) blockParentDirs(ident blobIdent) []string {
|
||||
return []string{
|
||||
periodicEpochBaseDir,
|
||||
l.periodDir(ident.epoch),
|
||||
l.epochDir(ident.epoch),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) notify(ident blobIdent) error {
|
||||
if err := l.cache.ensure(ident); err != nil {
|
||||
return err
|
||||
}
|
||||
l.pruner.notify(ident.epoch, l)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If before == 0, it won't be used as a filter and all idents will be returned.
|
||||
func (l *periodicEpochLayout) iterateIdents(before primitives.Epoch) (*identIterator, error) {
|
||||
_, err := l.fs.Stat(periodicEpochBaseDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &identIterator{eof: true}, nil // The directory is non-existent, which is fine; stop iteration.
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error reading path %s", periodicEpochBaseDir)
|
||||
}
|
||||
// iterate root, which should have directories named by "period"
|
||||
entries, err := listDir(l.fs, periodicEpochBaseDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to list %s", periodicEpochBaseDir)
|
||||
}
|
||||
|
||||
return &identIterator{
|
||||
fs: l.fs,
|
||||
path: periodicEpochBaseDir,
|
||||
// Please see comments on the `layers` field in `identIterator`` if the role of the layers is unclear.
|
||||
layers: []layoutLayer{
|
||||
{populateIdent: populateNoop, filter: isBeforePeriod(before)},
|
||||
{populateIdent: populateEpoch, filter: isBeforeEpoch(before)},
|
||||
{populateIdent: populateRoot, filter: isRootDir}, // extract root from path
|
||||
{populateIdent: populateIndex, filter: isSszFile}, // extract index from filename
|
||||
},
|
||||
entries: entries,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) ident(root [32]byte, idx uint64) (blobIdent, error) {
|
||||
return l.cache.identForIdx(root, idx)
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) dirIdent(root [32]byte) (blobIdent, error) {
|
||||
return l.cache.identForRoot(root)
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) summary(root [32]byte) BlobStorageSummary {
|
||||
return l.cache.Summary(root)
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) dir(n blobIdent) string {
|
||||
return filepath.Join(l.epochDir(n.epoch), rootToString(n.root))
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) epochDir(epoch primitives.Epoch) string {
|
||||
return filepath.Join(l.periodDir(epoch), fmt.Sprintf("%d", epoch))
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) periodDir(epoch primitives.Epoch) string {
|
||||
return filepath.Join(periodicEpochBaseDir, fmt.Sprintf("%d", periodForEpoch(epoch)))
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) sszPath(n blobIdent) string {
|
||||
return filepath.Join(l.dir(n), n.sszFname())
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) partPath(n blobIdent, entropy string) string {
|
||||
return path.Join(l.dir(n), n.partFname(entropy))
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) pruneBefore(before primitives.Epoch) (*pruneSummary, error) {
|
||||
sums, err := pruneBefore(before, l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Roll up summaries and clean up per-epoch directories.
|
||||
rollup := &pruneSummary{}
|
||||
for epoch, sum := range sums {
|
||||
rollup.blobsPruned += sum.blobsPruned
|
||||
rollup.failedRemovals = append(rollup.failedRemovals, sum.failedRemovals...)
|
||||
rmdir := l.epochDir(epoch)
|
||||
if len(sum.failedRemovals) == 0 {
|
||||
if err := l.fs.Remove(rmdir); err != nil {
|
||||
log.WithField("dir", rmdir).WithError(err).Error("Failed to remove epoch directory while pruning")
|
||||
}
|
||||
} else {
|
||||
log.WithField("dir", rmdir).WithField("numFailed", len(sum.failedRemovals)).WithError(err).Error("Unable to remove epoch directory due to pruning failures")
|
||||
}
|
||||
}
|
||||
|
||||
return rollup, nil
|
||||
}
|
||||
|
||||
func (l *periodicEpochLayout) remove(ident blobIdent) (int, error) {
|
||||
removed := l.cache.evict(ident.root)
|
||||
// Skip the syscall if there are no blobs to remove.
|
||||
if removed == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if err := l.fs.RemoveAll(l.dir(ident)); err != nil {
|
||||
return removed, err
|
||||
}
|
||||
return removed, nil
|
||||
}
|
||||
|
||||
func periodForEpoch(epoch primitives.Epoch) primitives.Epoch {
|
||||
return epoch / params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
}
|
||||
|
||||
// Funcs below this line are iteration support methods that are specific to the epoch layout.
|
||||
|
||||
func isBeforePeriod(before primitives.Epoch) func(string) bool {
|
||||
if before == 0 {
|
||||
return filterNoop
|
||||
}
|
||||
beforePeriod := periodForEpoch(before)
|
||||
if before%epochsPerDirectory != 0 {
|
||||
// Add one because we need to include the period the epoch is in, unless it is the first epoch in the period,
|
||||
// in which case we can just look at any previous period.
|
||||
beforePeriod += 1
|
||||
}
|
||||
return func(p string) bool {
|
||||
period, err := periodFromPath(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return primitives.Epoch(period) < beforePeriod
|
||||
}
|
||||
}
|
||||
|
||||
func isBeforeEpoch(before primitives.Epoch) func(string) bool {
|
||||
if before == 0 {
|
||||
return filterNoop
|
||||
}
|
||||
return func(p string) bool {
|
||||
epoch, err := epochFromPath(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return epoch < before
|
||||
}
|
||||
}
|
||||
|
||||
func epochFromPath(p string) (primitives.Epoch, error) {
|
||||
subdir := filepath.Base(p)
|
||||
epoch, err := strconv.ParseUint(subdir, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(errInvalidDirectoryLayout,
|
||||
"failed to decode epoch as uint, err=%s, dir=%s", err.Error(), p)
|
||||
}
|
||||
return primitives.Epoch(epoch), nil
|
||||
}
|
||||
|
||||
func periodFromPath(p string) (uint64, error) {
|
||||
subdir := filepath.Base(p)
|
||||
period, err := strconv.ParseUint(subdir, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(errInvalidDirectoryLayout,
|
||||
"failed to decode period from path as uint, err=%s, dir=%s", err.Error(), p)
|
||||
}
|
||||
return period, nil
|
||||
}
|
||||
|
||||
func populateEpoch(namer blobIdent, dir string) (blobIdent, error) {
|
||||
epoch, err := epochFromPath(dir)
|
||||
if err != nil {
|
||||
return namer, err
|
||||
}
|
||||
namer.epoch = epoch
|
||||
return namer, nil
|
||||
}
|
||||
219
beacon-chain/db/filesystem/layout_flat.go
Normal file
219
beacon-chain/db/filesystem/layout_flat.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
type flatLayout struct {
|
||||
fs afero.Fs
|
||||
cache *blobStorageSummaryCache
|
||||
pruner *blobPruner
|
||||
}
|
||||
|
||||
var _ fsLayout = &flatLayout{}
|
||||
|
||||
func newFlatLayout(fs afero.Fs, cache *blobStorageSummaryCache, pruner *blobPruner) fsLayout {
|
||||
l := &flatLayout{fs: fs, cache: cache, pruner: pruner}
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *flatLayout) iterateIdents(before primitives.Epoch) (*identIterator, error) {
|
||||
if _, err := l.fs.Stat("."); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return &identIterator{eof: true}, nil // The directory is non-existent, which is fine; stop iteration.
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error reading path %s", periodicEpochBaseDir)
|
||||
}
|
||||
entries, err := listDir(l.fs, ".")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not list root directory")
|
||||
}
|
||||
slotAndIndex := &flatSlotReader{fs: l.fs, cache: l.cache, before: before}
|
||||
return &identIterator{
|
||||
fs: l.fs,
|
||||
// Please see comments on the `layers` field in `identIterator`` if the role of the layers is unclear.
|
||||
layers: []layoutLayer{
|
||||
{populateIdent: populateRoot, filter: isFlatCachedAndBefore(l.cache, before)},
|
||||
{populateIdent: slotAndIndex.populateEpoch, filter: slotAndIndex.isSSZAndBefore}},
|
||||
entries: entries,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (*flatLayout) name() string {
|
||||
return LayoutNameFlat
|
||||
}
|
||||
|
||||
func (l *flatLayout) blockParentDirs(ident blobIdent) []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (*flatLayout) dir(n blobIdent) string {
|
||||
return rootToString(n.root)
|
||||
}
|
||||
|
||||
func (l *flatLayout) sszPath(n blobIdent) string {
|
||||
return path.Join(l.dir(n), n.sszFname())
|
||||
}
|
||||
|
||||
func (l *flatLayout) partPath(n blobIdent, entropy string) string {
|
||||
return path.Join(l.dir(n), n.partFname(entropy))
|
||||
}
|
||||
|
||||
func (l *flatLayout) ident(root [32]byte, idx uint64) (blobIdent, error) {
|
||||
return l.cache.identForIdx(root, idx)
|
||||
}
|
||||
|
||||
func (l *flatLayout) dirIdent(root [32]byte) (blobIdent, error) {
|
||||
return l.cache.identForRoot(root)
|
||||
}
|
||||
|
||||
func (l *flatLayout) summary(root [32]byte) BlobStorageSummary {
|
||||
return l.cache.Summary(root)
|
||||
}
|
||||
|
||||
func (l *flatLayout) remove(ident blobIdent) (int, error) {
|
||||
removed := l.cache.evict(ident.root)
|
||||
if err := l.fs.RemoveAll(l.dir(ident)); err != nil {
|
||||
return removed, err
|
||||
}
|
||||
return removed, nil
|
||||
}
|
||||
|
||||
func (l *flatLayout) notify(ident blobIdent) error {
|
||||
if err := l.cache.ensure(ident); err != nil {
|
||||
return err
|
||||
}
|
||||
l.pruner.notify(ident.epoch, l)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *flatLayout) pruneBefore(before primitives.Epoch) (*pruneSummary, error) {
|
||||
sums, err := pruneBefore(before, l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Roll up summaries and clean up per-epoch directories.
|
||||
rollup := &pruneSummary{}
|
||||
for _, sum := range sums {
|
||||
rollup.blobsPruned += sum.blobsPruned
|
||||
rollup.failedRemovals = append(rollup.failedRemovals, sum.failedRemovals...)
|
||||
}
|
||||
|
||||
return rollup, nil
|
||||
}
|
||||
|
||||
// Below this line are iteration support funcs and types that are specific to the flat layout.
|
||||
|
||||
// Read slot from marshaled BlobSidecar data in the given file. See slotFromBlob for details.
|
||||
func slotFromFile(name string, fs afero.Fs) (primitives.Slot, error) {
|
||||
f, err := fs.Open(name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close blob file")
|
||||
}
|
||||
}()
|
||||
return slotFromBlob(f)
|
||||
}
|
||||
|
||||
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
|
||||
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
|
||||
// preceding the slot information within SignedBeaconBlockHeader.
|
||||
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
|
||||
b := make([]byte, 8)
|
||||
_, err := at.ReadAt(b, 131176)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rawSlot := binary.LittleEndian.Uint64(b)
|
||||
return primitives.Slot(rawSlot), nil
|
||||
}
|
||||
|
||||
type flatSlotReader struct {
|
||||
before primitives.Epoch
|
||||
fs afero.Fs
|
||||
cache *blobStorageSummaryCache
|
||||
}
|
||||
|
||||
func (l *flatSlotReader) populateEpoch(ident blobIdent, fname string) (blobIdent, error) {
|
||||
ident, err := populateIndex(ident, fname)
|
||||
if err != nil {
|
||||
return ident, err
|
||||
}
|
||||
sum, ok := l.cache.get(ident.root)
|
||||
if ok {
|
||||
ident.epoch = sum.epoch
|
||||
// Return early if the index is already known to the cache.
|
||||
if sum.HasIndex(ident.index) {
|
||||
return ident, nil
|
||||
}
|
||||
} else {
|
||||
// If the root is not in the cache, we need to read the slot from the file.
|
||||
slot, err := slotFromFile(fname, l.fs)
|
||||
if err != nil {
|
||||
return ident, err
|
||||
}
|
||||
ident.epoch = slots.ToEpoch(slot)
|
||||
}
|
||||
return ident, l.cache.ensure(ident)
|
||||
}
|
||||
|
||||
func (l *flatSlotReader) isSSZAndBefore(fname string) bool {
|
||||
if !isSszFile(fname) {
|
||||
return false
|
||||
}
|
||||
// If 'before' != 0, assuming isSSZAndBefore is used as a filter on the same layer with populateEpoch, this will typically
|
||||
// call populateEpoch before the iteration code calls it. So we can guarantee that the cache gets populated
|
||||
// in either case, because if it is filtered out here, we either have a malformed path (root can't be determined) in which case
|
||||
// the filter code won't call it anyway, or we have a valid path and the cache will be populated before the epoch can be compared.
|
||||
if l.before == 0 {
|
||||
return true
|
||||
}
|
||||
ident, err := populateRoot(blobIdent{}, path.Dir(fname))
|
||||
// Filter out the path if we can't determine its root.
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
ident, err = l.populateEpoch(ident, fname)
|
||||
// Filter out the path if we can't determine its epoch or properly cache it.
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return ident.epoch < l.before
|
||||
}
|
||||
|
||||
// isFlatCachedAndBefore returns a filter callback function to exclude roots that are known to be after the given epoch
|
||||
// based on the cache. It's an opportunistic filter; if the cache is not populated, it will not attempt to populate it.
|
||||
// isSSZAndBefore on the other hand, is a strict filter that will only return true if the file is an SSZ file and
|
||||
// the epoch can be determined.
|
||||
func isFlatCachedAndBefore(cache *blobStorageSummaryCache, before primitives.Epoch) func(string) bool {
|
||||
if before == 0 {
|
||||
return isRootDir
|
||||
}
|
||||
return func(p string) bool {
|
||||
if !isRootDir(p) {
|
||||
return false
|
||||
}
|
||||
root, err := rootFromPath(p)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
sum, ok := cache.get(root)
|
||||
// If we don't know the epoch by looking at the root, don't try to filter it.
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
return sum.epoch < before
|
||||
}
|
||||
}
|
||||
75
beacon-chain/db/filesystem/layout_test.go
Normal file
75
beacon-chain/db/filesystem/layout_test.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
type mockLayout struct {
|
||||
pruneBeforeFunc func(primitives.Epoch) (*pruneSummary, error)
|
||||
}
|
||||
|
||||
var _ fsLayout = &mockLayout{}
|
||||
|
||||
func (m *mockLayout) name() string {
|
||||
return "mock"
|
||||
}
|
||||
|
||||
func (*mockLayout) dir(_ blobIdent) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (*mockLayout) blockParentDirs(id blobIdent) []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (*mockLayout) sszPath(_ blobIdent) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (*mockLayout) partPath(_ blobIdent, _ string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (*mockLayout) iterateIdents(_ primitives.Epoch) (*identIterator, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockLayout) ident(_ [32]byte, _ uint64) (blobIdent, error) {
|
||||
return blobIdent{}, nil
|
||||
}
|
||||
|
||||
func (*mockLayout) dirIdent(_ [32]byte) (blobIdent, error) {
|
||||
return blobIdent{}, nil
|
||||
}
|
||||
|
||||
func (*mockLayout) summary(_ [32]byte) BlobStorageSummary {
|
||||
return BlobStorageSummary{}
|
||||
}
|
||||
|
||||
func (*mockLayout) notify(blobIdent) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockLayout) pruneBefore(before primitives.Epoch) (*pruneSummary, error) {
|
||||
return m.pruneBeforeFunc(before)
|
||||
}
|
||||
|
||||
func (*mockLayout) remove(ident blobIdent) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var _ fsLayout = &mockLayout{}
|
||||
|
||||
func TestCleaner(t *testing.T) {
|
||||
l := &periodicEpochLayout{}
|
||||
p := l.periodDir(11235813)
|
||||
e := l.epochDir(11235813)
|
||||
dc := newDirCleaner()
|
||||
dc.add(p)
|
||||
require.Equal(t, 2, dc.maxDepth)
|
||||
dc.add(e)
|
||||
require.Equal(t, 3, dc.maxDepth)
|
||||
}
|
||||
180
beacon-chain/db/filesystem/migration_test.go
Normal file
180
beacon-chain/db/filesystem/migration_test.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func ezIdent(t *testing.T, rootStr string, epoch primitives.Epoch, index uint64) blobIdent {
|
||||
r, err := stringToRoot(rootStr)
|
||||
require.NoError(t, err)
|
||||
return blobIdent{root: r, epoch: epoch, index: index}
|
||||
}
|
||||
|
||||
func setupTestBlobFile(t *testing.T, ident blobIdent, offset primitives.Slot, fs afero.Fs, l fsLayout) {
|
||||
slot, err := slots.EpochStart(ident.epoch)
|
||||
require.NoError(t, err)
|
||||
slot += offset
|
||||
_, sc := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
scb, err := sc[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
dir := l.dir(ident)
|
||||
require.NoError(t, fs.MkdirAll(dir, directoryPermissions()))
|
||||
p := l.sszPath(ident)
|
||||
require.NoError(t, afero.WriteFile(fs, p, scb, 0666))
|
||||
_, err = fs.Stat(p)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
type migrationTestTarget struct {
|
||||
ident blobIdent
|
||||
slotOffset primitives.Slot
|
||||
migrated bool
|
||||
path string
|
||||
}
|
||||
|
||||
func testAssertFsMigrated(t *testing.T, fs afero.Fs, ident blobIdent, before, after fsLayout) {
|
||||
// Assert the pre-migration path is gone.
|
||||
_, err := fs.Stat(before.sszPath(ident))
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
dir := before.dir(ident)
|
||||
_, err = listDir(fs, dir)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
|
||||
// Assert the post-migration path present.
|
||||
_, err = fs.Stat(after.sszPath(ident))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestMigrations(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
forwardLayout string
|
||||
backwardLayout string
|
||||
targets []migrationTestTarget
|
||||
}{
|
||||
{
|
||||
name: "all need migration",
|
||||
backwardLayout: LayoutNameFlat,
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
slotOffset: 16,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mix old and new",
|
||||
backwardLayout: LayoutNameFlat,
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
migrated: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
t.Run("forward", func(t *testing.T) {
|
||||
testMigration(t, c.forwardLayout, c.backwardLayout, c.targets)
|
||||
})
|
||||
// run the same test in reverse - to cover both directions while making the test table smaller.
|
||||
t.Run("backward", func(t *testing.T) {
|
||||
testMigration(t, c.forwardLayout, c.backwardLayout, c.targets)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testMigration(t *testing.T, forwardName, backwardName string, targets []migrationTestTarget) {
|
||||
fs := afero.NewMemMapFs()
|
||||
cache := newBlobStorageCache()
|
||||
forward, err := newLayout(forwardName, fs, cache, nil)
|
||||
require.NoError(t, err)
|
||||
backward, err := newLayout(backwardName, fs, cache, nil)
|
||||
require.NoError(t, err)
|
||||
for _, tar := range targets {
|
||||
if tar.migrated {
|
||||
setupTestBlobFile(t, tar.ident, tar.slotOffset, fs, forward)
|
||||
} else {
|
||||
setupTestBlobFile(t, tar.ident, tar.slotOffset, fs, backward)
|
||||
}
|
||||
}
|
||||
require.NoError(t, migrateLayout(fs, backward, forward, cache))
|
||||
for _, tar := range targets {
|
||||
// Make sure the file wound up in the right spot, according to the forward layout
|
||||
// and that the old file is gone, according to the backward layout.
|
||||
testAssertFsMigrated(t, fs, tar.ident, backward, forward)
|
||||
entry, ok := cache.get(tar.ident.root)
|
||||
// we only expect cache to be populated here by files that needed to be moved.
|
||||
if !tar.migrated {
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, entry.HasIndex(tar.ident.index))
|
||||
require.Equal(t, tar.ident.epoch, entry.epoch)
|
||||
}
|
||||
}
|
||||
|
||||
// Run migration in reverse - testing "undo"
|
||||
cache = newBlobStorageCache()
|
||||
forward, err = newLayout(forwardName, fs, cache, nil)
|
||||
require.NoError(t, err)
|
||||
backward, err = newLayout(backwardName, fs, cache, nil)
|
||||
require.NoError(t, err)
|
||||
// forward and backward are flipped compared to the above
|
||||
require.NoError(t, migrateLayout(fs, forward, backward, cache))
|
||||
for _, tar := range targets {
|
||||
// just like the above, but forward and backward are flipped
|
||||
testAssertFsMigrated(t, fs, tar.ident, forward, backward)
|
||||
entry, ok := cache.get(tar.ident.root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, true, entry.HasIndex(tar.ident.index))
|
||||
require.Equal(t, tar.ident.epoch, entry.epoch)
|
||||
}
|
||||
}
|
||||
@@ -4,30 +4,41 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// NewEphemeralBlobStorage should only be used for tests.
|
||||
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralBlobStorage(t testing.TB) *BlobStorage {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
}
|
||||
return &BlobStorage{fs: fs, pruner: pruner}
|
||||
func NewEphemeralBlobStorage(t testing.TB, opts ...BlobStorageOption) *BlobStorage {
|
||||
return NewWarmedEphemeralBlobStorageUsingFs(t, afero.NewMemMapFs(), opts...)
|
||||
}
|
||||
|
||||
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
|
||||
// NewEphemeralBlobStorageAndFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(t testing.TB) (afero.Fs, *BlobStorage) {
|
||||
func NewEphemeralBlobStorageAndFs(t testing.TB, opts ...BlobStorageOption) (afero.Fs, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
pruner, err := newBlobPruner(fs, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest, withWarmedCache())
|
||||
bs := NewWarmedEphemeralBlobStorageUsingFs(t, fs, opts...)
|
||||
return fs, bs
|
||||
}
|
||||
|
||||
func NewEphemeralBlobStorageUsingFs(t testing.TB, fs afero.Fs, opts ...BlobStorageOption) *BlobStorage {
|
||||
opts = append(opts,
|
||||
WithBlobRetentionEpochs(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest),
|
||||
WithFs(fs))
|
||||
bs, err := NewBlobStorage(opts...)
|
||||
if err != nil {
|
||||
t.Fatal("test setup issue", err)
|
||||
t.Fatalf("error initializing test BlobStorage, err=%s", err.Error())
|
||||
}
|
||||
return fs, &BlobStorage{fs: fs, pruner: pruner}
|
||||
return bs
|
||||
}
|
||||
|
||||
func NewWarmedEphemeralBlobStorageUsingFs(t testing.TB, fs afero.Fs, opts ...BlobStorageOption) *BlobStorage {
|
||||
bs := NewEphemeralBlobStorageUsingFs(t, fs, opts...)
|
||||
bs.WarmCache()
|
||||
return bs
|
||||
}
|
||||
|
||||
type BlobMocker struct {
|
||||
@@ -37,17 +48,9 @@ type BlobMocker struct {
|
||||
|
||||
// CreateFakeIndices creates empty blob sidecar files at the expected path for the given
|
||||
// root and indices to influence the result of Indices().
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices ...uint64) error {
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, slot primitives.Slot, indices ...uint64) error {
|
||||
for i := range indices {
|
||||
n := blobNamer{root: root, index: indices[i]}
|
||||
if err := bm.fs.MkdirAll(n.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := bm.fs.Create(n.path())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
if err := bm.bs.layout.notify(newBlobIdent(root, slots.ToEpoch(slot), indices[i])); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -56,9 +59,8 @@ func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices ...uint64) error
|
||||
|
||||
// NewEphemeralBlobStorageWithMocker returns a *BlobMocker value in addition to the BlobStorage value.
|
||||
// BlockMocker encapsulates things blob path construction to avoid leaking implementation details.
|
||||
func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
bs := &BlobStorage{fs: fs}
|
||||
func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage) {
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t)
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
|
||||
@@ -66,7 +68,7 @@ func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStor
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(k, 0, uint64(v[i])); err != nil {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: 0, index: uint64(v[i])}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,319 +1,67 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
const retentionBuffer primitives.Epoch = 2
|
||||
|
||||
var (
|
||||
errPruningFailures = errors.New("blobs could not be pruned for some roots")
|
||||
errNotBlobSSZ = errors.New("not a blob ssz file")
|
||||
)
|
||||
var errNotBlobSSZ = errors.New("not a blob ssz file")
|
||||
|
||||
// blobPruner keeps track of the tail end of the retention period, based only the blobs it has seen via the notify method.
|
||||
// If the retention period advances in response to notify being called,
|
||||
// the pruner will invoke the pruneBefore method of the given layout in a new goroutine.
|
||||
// The details of pruning are left entirely to the layout, with the pruner's only responsibility being to
|
||||
// schedule just one pruning operation at a time, for each forward movement of the minimum retention epoch.
|
||||
type blobPruner struct {
|
||||
sync.Mutex
|
||||
prunedBefore atomic.Uint64
|
||||
windowSize primitives.Slot
|
||||
cache *blobStorageCache
|
||||
cacheReady chan struct{}
|
||||
warmed bool
|
||||
fs afero.Fs
|
||||
mu sync.Mutex
|
||||
prunedBefore atomic.Uint64
|
||||
retentionPeriod primitives.Epoch
|
||||
}
|
||||
|
||||
type prunerOpt func(*blobPruner) error
|
||||
|
||||
func withWarmedCache() prunerOpt {
|
||||
return func(p *blobPruner) error {
|
||||
return p.warmCache()
|
||||
}
|
||||
func newBlobPruner(retain primitives.Epoch) *blobPruner {
|
||||
p := &blobPruner{retentionPeriod: retain + retentionBuffer}
|
||||
return p
|
||||
}
|
||||
|
||||
func newBlobPruner(fs afero.Fs, retain primitives.Epoch, opts ...prunerOpt) (*blobPruner, error) {
|
||||
r, err := slots.EpochStart(retain + retentionBuffer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set retentionSlots")
|
||||
}
|
||||
cw := make(chan struct{})
|
||||
p := &blobPruner{fs: fs, windowSize: r, cache: newBlobStorageCache(), cacheReady: cw}
|
||||
for _, o := range opts {
|
||||
if err := o(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// notify updates the pruner's view of root->blob mappings. This allows the pruner to build a cache
|
||||
// of root->slot mappings and decide when to evict old blobs based on the age of present blobs.
|
||||
func (p *blobPruner) notify(root [32]byte, latest primitives.Slot, idx uint64) error {
|
||||
if err := p.cache.ensure(root, latest, idx); err != nil {
|
||||
return err
|
||||
}
|
||||
pruned := uint64(windowMin(latest, p.windowSize))
|
||||
if p.prunedBefore.Swap(pruned) == pruned {
|
||||
return nil
|
||||
// notify returns a channel that is closed when the pruning operation is complete.
|
||||
// This is useful for tests, but at runtime fsLayouts or BlobStorage should not wait for completion.
|
||||
func (p *blobPruner) notify(latest primitives.Epoch, layout fsLayout) chan struct{} {
|
||||
done := make(chan struct{})
|
||||
floor := periodFloor(latest, p.retentionPeriod)
|
||||
if primitives.Epoch(p.prunedBefore.Swap(uint64(floor))) >= floor {
|
||||
// Only trigger pruning if the atomic swap changed the previous value of prunedBefore.
|
||||
close(done)
|
||||
return done
|
||||
}
|
||||
go func() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if err := p.prune(primitives.Slot(pruned)); err != nil {
|
||||
log.WithError(err).Errorf("Failed to prune blobs from slot %d", latest)
|
||||
p.mu.Lock()
|
||||
start := time.Now()
|
||||
defer p.mu.Unlock()
|
||||
sum, err := layout.pruneBefore(floor)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(sum.LogFields()).Warn("Encountered errors during blob pruning.")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"upToEpoch": floor,
|
||||
"duration": time.Since(start).String(),
|
||||
"filesRemoved": sum.blobsPruned,
|
||||
}).Debug("Pruned old blobs")
|
||||
blobsPrunedCounter.Add(float64(sum.blobsPruned))
|
||||
close(done)
|
||||
}()
|
||||
return nil
|
||||
return done
|
||||
}
|
||||
|
||||
func windowMin(latest, offset primitives.Slot) primitives.Slot {
|
||||
// Safely compute the first slot in the epoch for the latest slot
|
||||
latest = latest - latest%params.BeaconConfig().SlotsPerEpoch
|
||||
if latest < offset {
|
||||
func periodFloor(latest, period primitives.Epoch) primitives.Epoch {
|
||||
if latest < period {
|
||||
return 0
|
||||
}
|
||||
return latest - offset
|
||||
}
|
||||
|
||||
func (p *blobPruner) warmCache() error {
|
||||
p.Lock()
|
||||
defer func() {
|
||||
if !p.warmed {
|
||||
p.warmed = true
|
||||
close(p.cacheReady)
|
||||
}
|
||||
p.Unlock()
|
||||
}()
|
||||
if err := p.prune(0); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *blobPruner) waitForCache(ctx context.Context) (*blobStorageCache, error) {
|
||||
select {
|
||||
case <-p.cacheReady:
|
||||
return p.cache, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Prune prunes blobs in the base directory based on the retention epoch.
|
||||
// It deletes blobs older than currentEpoch - (retentionEpochs+bufferEpochs).
|
||||
// This is so that we keep a slight buffer and blobs are deleted after n+2 epochs.
|
||||
func (p *blobPruner) prune(pruneBefore primitives.Slot) error {
|
||||
start := time.Now()
|
||||
totalPruned, totalErr := 0, 0
|
||||
// Customize logging/metrics behavior for the initial cache warmup when slot=0.
|
||||
// We'll never see a prune request for slot 0, unless this is the initial call to warm up the cache.
|
||||
if pruneBefore == 0 {
|
||||
defer func() {
|
||||
log.WithField("duration", time.Since(start).String()).Debug("Warmed up pruner cache")
|
||||
}()
|
||||
} else {
|
||||
defer func() {
|
||||
log.WithFields(logrus.Fields{
|
||||
"upToEpoch": slots.ToEpoch(pruneBefore),
|
||||
"duration": time.Since(start).String(),
|
||||
"filesRemoved": totalPruned,
|
||||
}).Debug("Pruned old blobs")
|
||||
blobsPrunedCounter.Add(float64(totalPruned))
|
||||
}()
|
||||
}
|
||||
|
||||
entries, err := listDir(p.fs, ".")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to list root blobs directory")
|
||||
}
|
||||
dirs := filter(entries, filterRoot)
|
||||
for _, dir := range dirs {
|
||||
pruned, err := p.tryPruneDir(dir, pruneBefore)
|
||||
if err != nil {
|
||||
totalErr += 1
|
||||
log.WithError(err).WithField("directory", dir).Error("Unable to prune directory")
|
||||
}
|
||||
totalPruned += pruned
|
||||
}
|
||||
|
||||
if totalErr > 0 {
|
||||
return errors.Wrapf(errPruningFailures, "pruning failed for %d root directories", totalErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func shouldRetain(slot, pruneBefore primitives.Slot) bool {
|
||||
return slot >= pruneBefore
|
||||
}
|
||||
|
||||
func (p *blobPruner) tryPruneDir(dir string, pruneBefore primitives.Slot) (int, error) {
|
||||
root, err := rootFromDir(dir)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "invalid directory, could not parse subdir as root %s", dir)
|
||||
}
|
||||
slot, slotCached := p.cache.slot(root)
|
||||
// Return early if the slot is cached and doesn't need pruning.
|
||||
if slotCached && shouldRetain(slot, pruneBefore) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// entries will include things that aren't ssz files, like dangling .part files. We need these to
|
||||
// completely clean up the directory.
|
||||
entries, err := listDir(p.fs, dir)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to list blobs in directory %s", dir)
|
||||
}
|
||||
// scFiles filters the dir listing down to the ssz encoded BlobSidecar files. This allows us to peek
|
||||
// at the first one in the list to figure out the slot.
|
||||
scFiles := filter(entries, filterSsz)
|
||||
if len(scFiles) == 0 {
|
||||
log.WithField("dir", dir).Warn("Pruner ignoring directory with no blob files")
|
||||
return 0, nil
|
||||
}
|
||||
if !slotCached {
|
||||
slot, err = slotFromFile(path.Join(dir, scFiles[0]), p.fs)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "slot could not be read from blob file %s", scFiles[0])
|
||||
}
|
||||
for i := range scFiles {
|
||||
idx, err := idxFromPath(scFiles[i])
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "index could not be determined for blob file %s", scFiles[i])
|
||||
}
|
||||
if err := p.cache.ensure(root, slot, idx); err != nil {
|
||||
return 0, errors.Wrapf(err, "could not update prune cache for blob file %s", scFiles[i])
|
||||
}
|
||||
}
|
||||
if shouldRetain(slot, pruneBefore) {
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
removed := 0
|
||||
for _, fname := range entries {
|
||||
fullName := path.Join(dir, fname)
|
||||
if err := p.fs.Remove(fullName); err != nil {
|
||||
return removed, errors.Wrapf(err, "unable to remove %s", fullName)
|
||||
}
|
||||
// Don't count other files that happen to be in the dir, like dangling .part files.
|
||||
if filterSsz(fname) {
|
||||
removed += 1
|
||||
}
|
||||
// Log a warning whenever we clean up a .part file
|
||||
if filterPart(fullName) {
|
||||
log.WithField("file", fullName).Warn("Deleting abandoned blob .part file")
|
||||
}
|
||||
}
|
||||
if err := p.fs.Remove(dir); err != nil {
|
||||
return removed, errors.Wrapf(err, "unable to remove blob directory %s", dir)
|
||||
}
|
||||
|
||||
p.cache.evict(root)
|
||||
return len(scFiles), nil
|
||||
}
|
||||
|
||||
func idxFromPath(fname string) (uint64, error) {
|
||||
fname = path.Base(fname)
|
||||
|
||||
if filepath.Ext(fname) != dotSszExt {
|
||||
return 0, errors.Wrap(errNotBlobSSZ, "does not have .ssz extension")
|
||||
}
|
||||
parts := strings.Split(fname, ".")
|
||||
if len(parts) != 2 {
|
||||
return 0, errors.Wrap(errNotBlobSSZ, "unexpected filename structure (want <index>.ssz)")
|
||||
}
|
||||
return strconv.ParseUint(parts[0], 10, 64)
|
||||
}
|
||||
|
||||
func rootFromDir(dir string) ([32]byte, error) {
|
||||
subdir := filepath.Base(dir) // end of the path should be the blob directory, named by hex encoding of root
|
||||
root, err := stringToRoot(subdir)
|
||||
if err != nil {
|
||||
return root, errors.Wrapf(err, "invalid directory, could not parse subdir as root %s", dir)
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Read slot from marshaled BlobSidecar data in the given file. See slotFromBlob for details.
|
||||
func slotFromFile(file string, fs afero.Fs) (primitives.Slot, error) {
|
||||
f, err := fs.Open(file)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close blob file")
|
||||
}
|
||||
}()
|
||||
return slotFromBlob(f)
|
||||
}
|
||||
|
||||
// slotFromBlob reads the ssz data of a file at the specified offset (8 + 131072 + 48 + 48 = 131176 bytes),
|
||||
// which is calculated based on the size of the BlobSidecar struct and is based on the size of the fields
|
||||
// preceding the slot information within SignedBeaconBlockHeader.
|
||||
func slotFromBlob(at io.ReaderAt) (primitives.Slot, error) {
|
||||
b := make([]byte, 8)
|
||||
_, err := at.ReadAt(b, 131176)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
rawSlot := binary.LittleEndian.Uint64(b)
|
||||
return primitives.Slot(rawSlot), nil
|
||||
}
|
||||
|
||||
func listDir(fs afero.Fs, dir string) ([]string, error) {
|
||||
top, err := fs.Open(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open directory descriptor")
|
||||
}
|
||||
defer func() {
|
||||
if err := top.Close(); err != nil {
|
||||
log.WithError(err).Errorf("Could not close file %s", dir)
|
||||
}
|
||||
}()
|
||||
// re the -1 param: "If n <= 0, Readdirnames returns all the names from the directory in a single slice"
|
||||
dirs, err := top.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read directory listing")
|
||||
}
|
||||
return dirs, nil
|
||||
}
|
||||
|
||||
func filter(entries []string, filt func(string) bool) []string {
|
||||
filtered := make([]string, 0, len(entries))
|
||||
for i := range entries {
|
||||
if filt(entries[i]) {
|
||||
filtered = append(filtered, entries[i])
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
func filterRoot(s string) bool {
|
||||
return strings.HasPrefix(s, "0x")
|
||||
}
|
||||
|
||||
var dotSszExt = "." + sszExt
|
||||
var dotPartExt = "." + partExt
|
||||
|
||||
func filterSsz(s string) bool {
|
||||
return filepath.Ext(s) == dotSszExt
|
||||
}
|
||||
|
||||
func filterPart(s string) bool {
|
||||
return filepath.Ext(s) == dotPartExt
|
||||
return latest - period
|
||||
}
|
||||
|
||||
@@ -1,394 +1,197 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func TestTryPruneDir_CachedNotExpired(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
pr, err := newBlobPruner(fs, 0)
|
||||
require.NoError(t, err)
|
||||
slot := pr.windowSize
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, params.BeaconConfig().MaxBlobsPerBlock(slot))
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
rootStr := rootString(sc.BlockRoot())
|
||||
// This slot is right on the edge of what would need to be pruned, so by adding it to the cache and
|
||||
// skipping any other test setup, we can be certain the hot cache path never touches the filesystem.
|
||||
require.NoError(t, pr.cache.ensure(sc.BlockRoot(), sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(rootStr, pr.windowSize)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
type prunerScenario struct {
|
||||
name string
|
||||
prunedBefore primitives.Epoch
|
||||
retentionPeriod primitives.Epoch
|
||||
latest primitives.Epoch
|
||||
expected pruneExpectation
|
||||
}
|
||||
|
||||
func TestCacheWarmFail(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
n := blobNamer{root: bytesutil.ToBytes32([]byte("derp")), index: 0}
|
||||
bp := n.path()
|
||||
mkdir := path.Dir(bp)
|
||||
require.NoError(t, fs.MkdirAll(mkdir, directoryPermissions))
|
||||
|
||||
// Create an empty blob index in the fs by touching the file at a seemingly valid path.
|
||||
fi, err := fs.Create(bp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fi.Close())
|
||||
|
||||
// Cache warm should fail due to the unexpected EOF.
|
||||
pr, err := newBlobPruner(fs, 0)
|
||||
require.NoError(t, err)
|
||||
require.ErrorIs(t, pr.warmCache(), errPruningFailures)
|
||||
|
||||
// The cache warm has finished, so calling waitForCache with a super short deadline
|
||||
// should not block or hit the context deadline.
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(1*time.Millisecond))
|
||||
defer cancel()
|
||||
c, err := pr.waitForCache(ctx)
|
||||
// We will get an error and a nil value for the cache if we hit the deadline.
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, c)
|
||||
type pruneExpectation struct {
|
||||
called bool
|
||||
arg primitives.Epoch
|
||||
summary *pruneSummary
|
||||
err error
|
||||
}
|
||||
|
||||
func TestTryPruneDir_CachedExpired(t *testing.T) {
|
||||
t.Run("empty directory", func(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
pr, err := newBlobPruner(fs, 0)
|
||||
require.NoError(t, err)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
rootStr := rootString(sc.BlockRoot())
|
||||
require.NoError(t, fs.Mkdir(rootStr, directoryPermissions)) // make empty directory
|
||||
require.NoError(t, pr.cache.ensure(sc.BlockRoot(), sc.Slot(), 0))
|
||||
pruned, err := pr.tryPruneDir(rootStr, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
})
|
||||
t.Run("blobs to delete", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, bs.Save(scs[0]))
|
||||
require.NoError(t, bs.Save(scs[1]))
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
cs, cok := bs.pruner.cache.slot(scs[0].BlockRoot())
|
||||
require.Equal(t, true, cok)
|
||||
require.Equal(t, slot, cs)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
files, err := listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, pruned)
|
||||
files, err = listDir(fs, rootStr)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
require.Equal(t, 0, len(files))
|
||||
})
|
||||
func (e *pruneExpectation) record(before primitives.Epoch) (*pruneSummary, error) {
|
||||
e.called = true
|
||||
e.arg = before
|
||||
if e.summary == nil {
|
||||
e.summary = &pruneSummary{}
|
||||
}
|
||||
return e.summary, e.err
|
||||
}
|
||||
|
||||
func TestTryPruneDir_SlotFromFile(t *testing.T) {
|
||||
t.Run("expired blobs deleted", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
var slot primitives.Slot = 0
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, bs.Save(scs[0]))
|
||||
require.NoError(t, bs.Save(scs[1]))
|
||||
|
||||
// check that the root->slot is cached
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
cs, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, slot, cs)
|
||||
// evict it from the cache so that we trigger the file read path
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok = bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// ensure that we see the saved files in the filesystem
|
||||
files, err := listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, pruned)
|
||||
files, err = listDir(fs, rootStr)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
require.Equal(t, 0, len(files))
|
||||
})
|
||||
t.Run("not expired, intact", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
// Set slot equal to the window size, so it should be retained.
|
||||
slot := bs.pruner.windowSize
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 2)
|
||||
scs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, bs.Save(scs[0]))
|
||||
require.NoError(t, bs.Save(scs[1]))
|
||||
|
||||
// Evict slot mapping from the cache so that we trigger the file read path.
|
||||
root := scs[0].BlockRoot()
|
||||
rootStr := rootString(root)
|
||||
bs.pruner.cache.evict(root)
|
||||
_, ok := bs.pruner.cache.slot(root)
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
// Ensure that we see the saved files in the filesystem.
|
||||
files, err := listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
|
||||
// This should use the slotFromFile code (simulating restart).
|
||||
// Setting pruneBefore == slot, so that the slot will be outside the window (at the boundary).
|
||||
pruned, err := bs.pruner.tryPruneDir(rootStr, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, pruned)
|
||||
|
||||
// Ensure files are still present.
|
||||
files, err = listDir(fs, rootStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(files))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSlotFromBlob(t *testing.T) {
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
func TestPrunerNotify(t *testing.T) {
|
||||
defaultRetention := params.BeaconConfig().MinEpochsForBlobsSidecarsRequest
|
||||
cases := []prunerScenario{
|
||||
{
|
||||
name: "last epoch of period",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 11235,
|
||||
latest: defaultRetention + 11235,
|
||||
expected: pruneExpectation{called: false},
|
||||
},
|
||||
{
|
||||
name: "within period",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 11235,
|
||||
latest: 11235 + defaultRetention - 1,
|
||||
expected: pruneExpectation{called: false},
|
||||
},
|
||||
{
|
||||
name: "triggers",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 11235,
|
||||
latest: 11235 + 1 + defaultRetention,
|
||||
expected: pruneExpectation{called: true, arg: 11235 + 1},
|
||||
},
|
||||
{
|
||||
name: "from zero - before first period",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 0,
|
||||
latest: defaultRetention - 1,
|
||||
expected: pruneExpectation{called: false},
|
||||
},
|
||||
{
|
||||
name: "from zero - at boundary",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 0,
|
||||
latest: defaultRetention,
|
||||
expected: pruneExpectation{called: false},
|
||||
},
|
||||
{
|
||||
name: "from zero - triggers",
|
||||
retentionPeriod: defaultRetention,
|
||||
prunedBefore: 0,
|
||||
latest: defaultRetention + 1,
|
||||
expected: pruneExpectation{called: true, arg: 1},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc := sidecars[0]
|
||||
enc, err := sc.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
slot, err := slotFromBlob(bytes.NewReader(enc))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.slot, slot)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
actual := &pruneExpectation{}
|
||||
l := &mockLayout{pruneBeforeFunc: actual.record}
|
||||
pruner := &blobPruner{retentionPeriod: c.retentionPeriod}
|
||||
pruner.prunedBefore.Store(uint64(c.prunedBefore))
|
||||
done := pruner.notify(c.latest, l)
|
||||
<-done
|
||||
require.Equal(t, c.expected.called, actual.called)
|
||||
require.Equal(t, c.expected.arg, actual.arg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSlotFromFile(t *testing.T) {
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(fmt.Sprintf("slot %d", c.slot), func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, c.slot, 1)
|
||||
sc, err := verification.BlobSidecarNoop(sidecars[0])
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, bs.Save(sc))
|
||||
fname := namerForSidecar(sc)
|
||||
sszPath := fname.path()
|
||||
slot, err := slotFromFile(sszPath, fs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.slot, slot)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type dirFiles struct {
|
||||
name string
|
||||
isDir bool
|
||||
children []dirFiles
|
||||
}
|
||||
|
||||
func (df dirFiles) reify(t *testing.T, fs afero.Fs, base string) {
|
||||
fullPath := path.Join(base, df.name)
|
||||
if df.isDir {
|
||||
if df.name != "" {
|
||||
require.NoError(t, fs.Mkdir(fullPath, directoryPermissions))
|
||||
}
|
||||
for _, c := range df.children {
|
||||
c.reify(t, fs, fullPath)
|
||||
}
|
||||
} else {
|
||||
fp, err := fs.Create(fullPath)
|
||||
func testSetupBlobIdentPaths(t *testing.T, fs afero.Fs, bs *BlobStorage, idents []testIdent) []blobIdent {
|
||||
created := make([]blobIdent, len(idents))
|
||||
for i, id := range idents {
|
||||
slot, err := slots.EpochStart(id.epoch)
|
||||
require.NoError(t, err)
|
||||
_, err = fp.WriteString("derp")
|
||||
slot += id.offset
|
||||
_, scs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
sc := verification.FakeVerifyForTest(t, scs[0])
|
||||
require.NoError(t, bs.Save(sc))
|
||||
ident := identForSidecar(sc)
|
||||
_, err = fs.Stat(bs.layout.sszPath(ident))
|
||||
require.NoError(t, err)
|
||||
created[i] = ident
|
||||
}
|
||||
return created
|
||||
}
|
||||
|
||||
func testAssertBlobsPruned(t *testing.T, fs afero.Fs, bs *BlobStorage, pruned, remain []blobIdent) {
|
||||
for _, id := range pruned {
|
||||
_, err := fs.Stat(bs.layout.sszPath(id))
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, os.IsNotExist(err))
|
||||
}
|
||||
for _, id := range remain {
|
||||
_, err := fs.Stat(bs.layout.sszPath(id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (df dirFiles) childNames() []string {
|
||||
cn := make([]string, len(df.children))
|
||||
for i := range df.children {
|
||||
cn[i] = df.children[i].name
|
||||
}
|
||||
return cn
|
||||
type testIdent struct {
|
||||
blobIdent
|
||||
offset primitives.Slot
|
||||
}
|
||||
|
||||
func TestListDir(t *testing.T) {
|
||||
fs := afero.NewMemMapFs()
|
||||
|
||||
// parent directory
|
||||
fsLayout := dirFiles{isDir: true}
|
||||
// break out each subdir for easier assertions
|
||||
notABlob := dirFiles{name: "notABlob", isDir: true}
|
||||
childlessBlob := dirFiles{name: "0x0987654321", isDir: true}
|
||||
blobWithSsz := dirFiles{name: "0x1123581321", isDir: true,
|
||||
children: []dirFiles{{name: "1.ssz"}, {name: "2.ssz"}},
|
||||
func testRoots(n int) [][32]byte {
|
||||
roots := make([][32]byte, n)
|
||||
for i := range roots {
|
||||
binary.LittleEndian.PutUint32(roots[i][:], uint32(1+i))
|
||||
}
|
||||
blobWithSszAndTmp := dirFiles{name: "0x1234567890", isDir: true,
|
||||
children: []dirFiles{{name: "5.ssz"}, {name: "0.part"}}}
|
||||
fsLayout.children = append(fsLayout.children,
|
||||
notABlob, childlessBlob, blobWithSsz, blobWithSszAndTmp)
|
||||
return roots
|
||||
}
|
||||
|
||||
topChildren := make([]string, len(fsLayout.children))
|
||||
for i := range fsLayout.children {
|
||||
topChildren[i] = fsLayout.children[i].name
|
||||
}
|
||||
|
||||
fsLayout.reify(t, fs, "")
|
||||
func TestLayoutPruneBefore(t *testing.T) {
|
||||
roots := testRoots(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
dirPath string
|
||||
expected []string
|
||||
filter func(string) bool
|
||||
err error
|
||||
name string
|
||||
pruned []testIdent
|
||||
remain []testIdent
|
||||
pruneBefore primitives.Epoch
|
||||
err error
|
||||
sum pruneSummary
|
||||
}{
|
||||
{
|
||||
name: "non-existent",
|
||||
dirPath: "derp",
|
||||
expected: []string{},
|
||||
err: os.ErrNotExist,
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
dirPath: childlessBlob.name,
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "top",
|
||||
dirPath: ".",
|
||||
expected: topChildren,
|
||||
},
|
||||
{
|
||||
name: "custom filter: only notABlob",
|
||||
dirPath: ".",
|
||||
expected: []string{notABlob.name},
|
||||
filter: func(s string) bool {
|
||||
return s == notABlob.name
|
||||
name: "none pruned",
|
||||
pruneBefore: 1,
|
||||
pruned: []testIdent{},
|
||||
remain: []testIdent{
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "root filter",
|
||||
dirPath: ".",
|
||||
expected: []string{childlessBlob.name, blobWithSsz.name, blobWithSszAndTmp.name},
|
||||
filter: filterRoot,
|
||||
},
|
||||
{
|
||||
name: "ssz filter",
|
||||
dirPath: blobWithSsz.name,
|
||||
expected: blobWithSsz.childNames(),
|
||||
filter: filterSsz,
|
||||
},
|
||||
{
|
||||
name: "ssz mixed filter",
|
||||
dirPath: blobWithSszAndTmp.name,
|
||||
expected: []string{"5.ssz"},
|
||||
filter: filterSsz,
|
||||
name: "expected pruned before epoch",
|
||||
pruneBefore: 3,
|
||||
pruned: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: 2, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: 2, index: 3}},
|
||||
},
|
||||
remain: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: 3, index: 2}}, // boundary
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: 3, index: 0}}, // boundary
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: 4, index: 1}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: 4, index: 5}},
|
||||
},
|
||||
sum: pruneSummary{blobsPruned: 4},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
result, err := listDir(fs, c.dirPath)
|
||||
if c.filter != nil {
|
||||
result = filter(result, c.filter)
|
||||
}
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
require.Equal(t, 0, len(result))
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sort.Strings(c.expected)
|
||||
sort.Strings(result)
|
||||
require.DeepEqual(t, c.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRootFromDir(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
dir string
|
||||
err error
|
||||
root [32]byte
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
root: [32]byte{255, 255, 135, 94, 29, 152, 92, 92, 203, 33, 72, 148, 152, 63, 36, 40,
|
||||
237, 178, 113, 240, 248, 123, 104, 186, 112, 16, 228, 169, 157, 243, 181, 203},
|
||||
},
|
||||
{
|
||||
name: "too short",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5c",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
{
|
||||
name: "too log",
|
||||
dir: "0xffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cbb",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
{
|
||||
name: "missing prefix",
|
||||
dir: "ffff875e1d985c5ccb214894983f2428edb271f0f87b68ba7010e4a99df3b5cb",
|
||||
err: errInvalidRootString,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
root, err := stringToRoot(c.dir)
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t, WithLayout(LayoutNameByEpoch))
|
||||
pruned := testSetupBlobIdentPaths(t, fs, bs, c.pruned)
|
||||
remain := testSetupBlobIdentPaths(t, fs, bs, c.remain)
|
||||
sum, err := bs.layout.pruneBefore(c.pruneBefore)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.root, root)
|
||||
testAssertBlobsPruned(t, fs, bs, pruned, remain)
|
||||
require.Equal(t, c.sum.blobsPruned, sum.blobsPruned)
|
||||
require.Equal(t, len(c.pruned), sum.blobsPruned)
|
||||
require.Equal(t, len(c.sum.failedRemovals), len(sum.failedRemovals))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,15 +3,13 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -22,6 +20,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
|
||||
@@ -103,6 +103,7 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
|
||||
@@ -105,7 +105,7 @@ type Reconstructor interface {
|
||||
ReconstructFullBellatrixBlockBatch(
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, indices []bool) ([]blocks.VerifiedROBlob, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
@@ -531,32 +531,23 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and proofs,
|
||||
// and constructs the corresponding verified read-only blob sidecars.
|
||||
//
|
||||
// The 'exists' argument is a boolean list (must be the same length as body.BlobKzgCommitments), where each element corresponds to whether a
|
||||
// particular blob sidecar already exists. If exists[i] is true, the blob for the i-th KZG commitment
|
||||
// has already been retrieved and does not need to be fetched again from the execution layer (EL).
|
||||
//
|
||||
// For example:
|
||||
// - len(block.Body().BlobKzgCommitments()) == 6
|
||||
// - If exists = [true, false, true, false, true, false], the function will fetch the blobs
|
||||
// associated with indices 1, 3, and 5 (since those are marked as non-existent).
|
||||
// - If exists = [false ... x 6], the function will attempt to fetch all blobs.
|
||||
//
|
||||
// Only the blobs that do not already exist (where exists[i] is false) are fetched using the KZG commitments from block body.
|
||||
func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, exists []bool) ([]blocks.VerifiedROBlob, error) {
|
||||
// The 'hasIndex' argument is a function returns true if the given uint64 blob index already exists on disc.
|
||||
// Only the blobs that do not already exist (where hasIndex(i) is false)
|
||||
// will be fetched from the execution engine using the KZG commitments from block body.
|
||||
func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, hasIndex func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
blockBody := block.Block().Body()
|
||||
kzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get blob KZG commitments")
|
||||
}
|
||||
if len(kzgCommitments) > len(exists) {
|
||||
return nil, fmt.Errorf("length of KZG commitments (%d) is greater than length of exists (%d)", len(kzgCommitments), len(exists))
|
||||
}
|
||||
|
||||
// Collect KZG hashes for non-existing blobs
|
||||
var kzgHashes []common.Hash
|
||||
var kzgIndexes []int
|
||||
for i, commitment := range kzgCommitments {
|
||||
if !exists[i] {
|
||||
if !hasIndex(uint64(i)) {
|
||||
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
|
||||
kzgIndexes = append(kzgIndexes, i)
|
||||
}
|
||||
}
|
||||
if len(kzgHashes) == 0 {
|
||||
@@ -579,27 +570,21 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
|
||||
// Reconstruct verified blob sidecars
|
||||
var verifiedBlobs []blocks.VerifiedROBlob
|
||||
for i, blobIndex := 0, 0; i < len(kzgCommitments); i++ {
|
||||
if exists[i] {
|
||||
for i := 0; i < len(kzgHashes); i++ {
|
||||
if blobs[i] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if blobIndex >= len(blobs) || blobs[blobIndex] == nil {
|
||||
blobIndex++
|
||||
continue
|
||||
}
|
||||
blob := blobs[blobIndex]
|
||||
blobIndex++
|
||||
|
||||
proof, err := blocks.MerkleProofKZGCommitment(blockBody, i)
|
||||
blob := blobs[i]
|
||||
blobIndex := kzgIndexes[i]
|
||||
proof, err := blocks.MerkleProofKZGCommitment(blockBody, blobIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to get Merkle proof for KZG commitment")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to get Merkle proof for KZG commitment")
|
||||
continue
|
||||
}
|
||||
sidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(i),
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob.Blob,
|
||||
KzgCommitment: kzgCommitments[i],
|
||||
KzgCommitment: kzgCommitments[blobIndex],
|
||||
KzgProof: blob.KzgProof,
|
||||
SignedBlockHeader: header,
|
||||
CommitmentInclusionProof: proof,
|
||||
@@ -607,14 +592,14 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
|
||||
|
||||
roBlob, err := blocks.NewROBlobWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to create RO blob with root")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to create RO blob with root")
|
||||
continue
|
||||
}
|
||||
|
||||
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
|
||||
verifiedBlob, err := v.VerifiedROBlob()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify RO blob")
|
||||
log.WithError(err).WithField("index", blobIndex).Error("failed to verify RO blob")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
mocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
@@ -2395,6 +2396,12 @@ func Test_ExchangeCapabilities(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func mockSummary(t *testing.T, exists []bool) func(uint64) bool {
|
||||
hi, err := filesystem.NewBlobStorageSummary(params.BeaconConfig().DenebForkEpoch, exists)
|
||||
require.NoError(t, err)
|
||||
return hi.HasIndex
|
||||
}
|
||||
|
||||
func TestReconstructBlobSidecars(t *testing.T) {
|
||||
client := &Service{capabilityCache: &capabilityCache{}}
|
||||
b := util.NewBeaconBlockDeneb()
|
||||
@@ -2408,15 +2415,15 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
t.Run("all seen", func(t *testing.T) {
|
||||
exists := []bool{true, true, true, true, true, true}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
hi := mockSummary(t, []bool{true, true, true, true, true, true})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("get-blobs end point is not supported", func(t *testing.T) {
|
||||
exists := []bool{true, true, true, true, true, false}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
hi := mockSummary(t, []bool{true, true, true, true, true, false})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
@@ -2430,8 +2437,8 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := [6]bool{}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists[:])
|
||||
hi := mockSummary(t, make([]bool, 6))
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(verifiedBlobs))
|
||||
})
|
||||
@@ -2443,22 +2450,29 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := []bool{true, false, true, false, true, false}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
hi := mockSummary(t, []bool{true, false, true, false, true, false})
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("kzg is longer than exist", func(t *testing.T) {
|
||||
srv := createBlobServer(t, 3)
|
||||
t.Run("recovered 3 missing blobs with mutated blob mask", func(t *testing.T) {
|
||||
exists := []bool{true, false, true, false, true, false}
|
||||
hi := mockSummary(t, exists)
|
||||
|
||||
srv := createBlobServer(t, 3, func() {
|
||||
// Mutate blob mask
|
||||
exists[1] = true
|
||||
exists[3] = true
|
||||
})
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := []bool{true, false, true, false, true}
|
||||
_, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
require.ErrorContains(t, "length of KZG commitments (6) is greater than length of exists (5)", err)
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, hi)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(verifiedBlobs))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2472,12 +2486,16 @@ func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
return kzgCommitments
|
||||
}
|
||||
|
||||
func createBlobServer(t *testing.T, numBlobs int) *httptest.Server {
|
||||
func createBlobServer(t *testing.T, numBlobs int, callbackFuncs ...func()) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
// Execute callback functions for each request.
|
||||
for _, f := range callbackFuncs {
|
||||
f()
|
||||
}
|
||||
|
||||
blobs := make([]pb.BlobAndProofJson, numBlobs)
|
||||
for i := range blobs {
|
||||
|
||||
@@ -109,7 +109,7 @@ func (e *EngineClient) ReconstructFullBellatrixBlockBatch(
|
||||
}
|
||||
|
||||
// ReconstructBlobSidecars is a mock implementation of the ReconstructBlobSidecars method.
|
||||
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [32]byte, []bool) ([]blocks.VerifiedROBlob, error) {
|
||||
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [32]byte, func(uint64) bool) ([]blocks.VerifiedROBlob, error) {
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ func (f *ForkChoice) ShouldOverrideFCU() (override bool) {
|
||||
// }
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgWeightThreshold {
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ func (f *ForkChoice) GetProposerHead() [32]byte {
|
||||
}
|
||||
|
||||
// Only orphan a block if the head LMD vote is weak
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgWeightThreshold {
|
||||
if head.weight*100 > f.store.committeeWeight*params.BeaconConfig().ReorgHeadWeightThreshold {
|
||||
return head.root
|
||||
}
|
||||
|
||||
|
||||
@@ -34,11 +34,17 @@ func (s *Service) canUpdateAttestedValidator(idx primitives.ValidatorIndex, slot
|
||||
|
||||
// attestingIndices returns the indices of validators that participated in the given aggregated attestation.
|
||||
func attestingIndices(ctx context.Context, state state.BeaconState, att ethpb.Att) ([]uint64, error) {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
committeeBits := att.CommitteeBitsVal().BitIndices()
|
||||
committees := make([][]primitives.ValidatorIndex, len(committeeBits))
|
||||
var err error
|
||||
for i, ci := range committeeBits {
|
||||
committees[i], err = helpers.BeaconCommitteeFromState(ctx, state, att.GetData().Slot, primitives.CommitteeIndex(ci))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return attestation.AttestingIndices(att, committee)
|
||||
|
||||
return attestation.AttestingIndices(att, committees...)
|
||||
}
|
||||
|
||||
// logMessageTimelyFlagsForIndex returns the log message with performance info for the attestation (head, source, target)
|
||||
|
||||
@@ -236,6 +236,13 @@ func (s *Service) monitorRoutine(stateChannel chan *feed.Event, stateSub event.S
|
||||
} else {
|
||||
s.processAggregatedAttestation(s.ctx, data.Attestation)
|
||||
}
|
||||
case operation.SingleAttReceived:
|
||||
data, ok := e.Data.(*operation.SingleAttReceivedData)
|
||||
if !ok {
|
||||
log.Error("Event feed data is not of type *operation.SingleAttReceivedData")
|
||||
} else {
|
||||
s.processUnaggregatedAttestation(s.ctx, data.Attestation)
|
||||
}
|
||||
case operation.ExitReceived:
|
||||
data, ok := e.Data.(*operation.ExitReceivedData)
|
||||
if !ok {
|
||||
|
||||
@@ -50,7 +50,6 @@ func (p *PoolService) run() {
|
||||
|
||||
electraSlot, err := slots.EpochStart(params.BeaconConfig().ElectraForkEpoch)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get Electra start slot")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -64,7 +63,6 @@ func (p *PoolService) run() {
|
||||
|
||||
electraTime, err := slots.ToTime(uint64(p.clock.GenesisTime().Unix()), electraSlot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get Electra start time")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -724,6 +724,48 @@ func (s *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
type signedBlockContentPeeker struct {
|
||||
Block json.RawMessage `json:"signed_block"`
|
||||
}
|
||||
type slotPeeker struct {
|
||||
Block struct {
|
||||
Slot primitives.Slot `json:"slot,string"`
|
||||
} `json:"message"`
|
||||
}
|
||||
|
||||
func versionHeaderFromRequest(body []byte) (string, error) {
|
||||
// check is required for post deneb fork blocks contents
|
||||
p := &signedBlockContentPeeker{}
|
||||
if err := json.Unmarshal(body, p); err != nil {
|
||||
return "", errors.Wrap(err, "unable to peek slot from block contents")
|
||||
}
|
||||
data := body
|
||||
if len(p.Block) > 0 {
|
||||
data = p.Block
|
||||
}
|
||||
sp := &slotPeeker{}
|
||||
if err := json.Unmarshal(data, sp); err != nil {
|
||||
return "", errors.Wrap(err, "unable to peek slot from block")
|
||||
}
|
||||
ce := slots.ToEpoch(sp.Block.Slot)
|
||||
if ce >= params.BeaconConfig().FuluForkEpoch {
|
||||
return version.String(version.Fulu), nil
|
||||
} else if ce >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return version.String(version.Electra), nil
|
||||
} else if ce >= params.BeaconConfig().DenebForkEpoch {
|
||||
return version.String(version.Deneb), nil
|
||||
} else if ce >= params.BeaconConfig().CapellaForkEpoch {
|
||||
return version.String(version.Capella), nil
|
||||
} else if ce >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
return version.String(version.Bellatrix), nil
|
||||
} else if ce >= params.BeaconConfig().AltairForkEpoch {
|
||||
return version.String(version.Altair), nil
|
||||
} else {
|
||||
return version.String(version.Phase0), nil
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:gocognit
|
||||
func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *http.Request, versionRequired bool) { // nolint:gocognit
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
@@ -735,9 +777,28 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if !versionRequired && versionHeader == "" {
|
||||
versionHeader, err = versionHeaderFromRequest(body)
|
||||
if err != nil {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body for version header: %s", err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if versionHeader == version.String(version.Fulu) {
|
||||
fuluBlock := ð.SignedBeaconBlockContentsFulu{}
|
||||
if err = fuluBlock.UnmarshalSSZ(body); err != nil {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Fulu), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
fuluBlock := ð.SignedBeaconBlockContentsFulu{}
|
||||
if err = fuluBlock.UnmarshalSSZ(body); err == nil {
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Fulu{
|
||||
Fulu: fuluBlock,
|
||||
@@ -760,17 +821,17 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
s.proposeBlock(ctx, w, genericBlock)
|
||||
return
|
||||
}
|
||||
if versionHeader == version.String(version.Fulu) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Fulu), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
electraBlock := ð.SignedBeaconBlockContentsElectra{}
|
||||
if err = electraBlock.UnmarshalSSZ(body); err == nil {
|
||||
if versionHeader == version.String(version.Electra) {
|
||||
electraBlock := ð.SignedBeaconBlockContentsElectra{}
|
||||
if err = electraBlock.UnmarshalSSZ(body); err != nil {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Electra), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Electra{
|
||||
Electra: electraBlock,
|
||||
@@ -793,17 +854,17 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
s.proposeBlock(ctx, w, genericBlock)
|
||||
return
|
||||
}
|
||||
if versionHeader == version.String(version.Electra) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Electra), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
denebBlock := ð.SignedBeaconBlockContentsDeneb{}
|
||||
if err = denebBlock.UnmarshalSSZ(body); err == nil {
|
||||
if versionHeader == version.String(version.Deneb) {
|
||||
denebBlock := ð.SignedBeaconBlockContentsDeneb{}
|
||||
if err = denebBlock.UnmarshalSSZ(body); err != nil {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Deneb), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Deneb{
|
||||
Deneb: denebBlock,
|
||||
@@ -826,17 +887,18 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
s.proposeBlock(ctx, w, genericBlock)
|
||||
return
|
||||
}
|
||||
if versionHeader == version.String(version.Deneb) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Deneb), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
capellaBlock := ð.SignedBeaconBlockCapella{}
|
||||
if err = capellaBlock.UnmarshalSSZ(body); err == nil {
|
||||
if versionHeader == version.String(version.Capella) {
|
||||
capellaBlock := ð.SignedBeaconBlockCapella{}
|
||||
if err = capellaBlock.UnmarshalSSZ(body); err != nil {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Capella), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Capella{
|
||||
Capella: capellaBlock,
|
||||
@@ -849,17 +911,17 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
s.proposeBlock(ctx, w, genericBlock)
|
||||
return
|
||||
}
|
||||
if versionHeader == version.String(version.Capella) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Capella), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
bellatrixBlock := ð.SignedBeaconBlockBellatrix{}
|
||||
if err = bellatrixBlock.UnmarshalSSZ(body); err == nil {
|
||||
if versionHeader == version.String(version.Bellatrix) {
|
||||
bellatrixBlock := ð.SignedBeaconBlockBellatrix{}
|
||||
if err = bellatrixBlock.UnmarshalSSZ(body); err != nil {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Bellatrix), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Bellatrix{
|
||||
Bellatrix: bellatrixBlock,
|
||||
@@ -872,17 +934,18 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
s.proposeBlock(ctx, w, genericBlock)
|
||||
return
|
||||
}
|
||||
if versionHeader == version.String(version.Bellatrix) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Bellatrix), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
altairBlock := ð.SignedBeaconBlockAltair{}
|
||||
if err = altairBlock.UnmarshalSSZ(body); err == nil {
|
||||
if versionHeader == version.String(version.Altair) {
|
||||
altairBlock := ð.SignedBeaconBlockAltair{}
|
||||
if err = altairBlock.UnmarshalSSZ(body); err != nil {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Altair), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Altair{
|
||||
Altair: altairBlock,
|
||||
@@ -895,17 +958,17 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
s.proposeBlock(ctx, w, genericBlock)
|
||||
return
|
||||
}
|
||||
if versionHeader == version.String(version.Altair) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Altair), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
phase0Block := ð.SignedBeaconBlock{}
|
||||
if err = phase0Block.UnmarshalSSZ(body); err == nil {
|
||||
if versionHeader == version.String(version.Phase0) {
|
||||
phase0Block := ð.SignedBeaconBlock{}
|
||||
if err = phase0Block.UnmarshalSSZ(body); err != nil {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Phase0), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
genericBlock := ð.GenericSignedBeaconBlock{
|
||||
Block: ð.GenericSignedBeaconBlock_Phase0{
|
||||
Phase0: phase0Block,
|
||||
@@ -918,18 +981,11 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
s.proposeBlock(ctx, w, genericBlock)
|
||||
return
|
||||
}
|
||||
if versionHeader == version.String(version.Phase0) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Phase0), err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
httputil.HandleError(w, "Body does not represent a valid block type", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// nolint:gocognit
|
||||
func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *http.Request, versionRequired bool) { // nolint:gocognit
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
@@ -941,32 +997,41 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var consensusBlock *eth.GenericSignedBeaconBlock
|
||||
|
||||
var fuluBlockContents *structs.SignedBeaconBlockContentsFulu
|
||||
if err = unmarshalStrict(body, &fuluBlockContents); err == nil {
|
||||
consensusBlock, err = fuluBlockContents.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(consensusBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.broadcastSeenBlockSidecars(ctx, b, consensusBlock.GetFulu().Blobs, consensusBlock.GetFulu().KzgProofs); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecars")
|
||||
}
|
||||
}
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
if !versionRequired && versionHeader == "" {
|
||||
versionHeader, err = versionHeaderFromRequest(body)
|
||||
if err != nil {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body for version header: %s", err.Error()),
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
var consensusBlock *eth.GenericSignedBeaconBlock
|
||||
if versionHeader == version.String(version.Fulu) {
|
||||
var fuluBlockContents *structs.SignedBeaconBlockContentsFulu
|
||||
if err = unmarshalStrict(body, &fuluBlockContents); err == nil {
|
||||
consensusBlock, err = fuluBlockContents.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(consensusBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.broadcastSeenBlockSidecars(ctx, b, consensusBlock.GetFulu().Blobs, consensusBlock.GetFulu().KzgProofs); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecars")
|
||||
}
|
||||
}
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Fulu), err.Error()),
|
||||
@@ -975,29 +1040,29 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
var electraBlockContents *structs.SignedBeaconBlockContentsElectra
|
||||
if err = unmarshalStrict(body, &electraBlockContents); err == nil {
|
||||
consensusBlock, err = electraBlockContents.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(consensusBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.broadcastSeenBlockSidecars(ctx, b, consensusBlock.GetElectra().Blobs, consensusBlock.GetElectra().KzgProofs); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecars")
|
||||
if versionHeader == version.String(version.Electra) {
|
||||
var electraBlockContents *structs.SignedBeaconBlockContentsElectra
|
||||
if err = unmarshalStrict(body, &electraBlockContents); err == nil {
|
||||
consensusBlock, err = electraBlockContents.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(consensusBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.broadcastSeenBlockSidecars(ctx, b, consensusBlock.GetElectra().Blobs, consensusBlock.GetElectra().KzgProofs); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecars")
|
||||
}
|
||||
}
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
if versionHeader == version.String(version.Electra) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Electra), err.Error()),
|
||||
@@ -1006,29 +1071,29 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
var denebBlockContents *structs.SignedBeaconBlockContentsDeneb
|
||||
if err = unmarshalStrict(body, &denebBlockContents); err == nil {
|
||||
consensusBlock, err = denebBlockContents.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(consensusBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.broadcastSeenBlockSidecars(ctx, b, consensusBlock.GetDeneb().Blobs, consensusBlock.GetDeneb().KzgProofs); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecars")
|
||||
if versionHeader == version.String(version.Deneb) {
|
||||
var denebBlockContents *structs.SignedBeaconBlockContentsDeneb
|
||||
if err = unmarshalStrict(body, &denebBlockContents); err == nil {
|
||||
consensusBlock, err = denebBlockContents.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(consensusBlock)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if err := s.broadcastSeenBlockSidecars(ctx, b, consensusBlock.GetDeneb().Blobs, consensusBlock.GetDeneb().KzgProofs); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecars")
|
||||
}
|
||||
}
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
if versionHeader == version.String(version.Deneb) {
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Deneb), err.Error()),
|
||||
@@ -1037,19 +1102,20 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
var capellaBlock *structs.SignedBeaconBlockCapella
|
||||
if err = unmarshalStrict(body, &capellaBlock); err == nil {
|
||||
consensusBlock, err = capellaBlock.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
if versionHeader == version.String(version.Capella) {
|
||||
var capellaBlock *structs.SignedBeaconBlockCapella
|
||||
if err = unmarshalStrict(body, &capellaBlock); err == nil {
|
||||
consensusBlock, err = capellaBlock.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
if versionHeader == version.String(version.Capella) {
|
||||
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Capella), err.Error()),
|
||||
@@ -1058,19 +1124,20 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
var bellatrixBlock *structs.SignedBeaconBlockBellatrix
|
||||
if err = unmarshalStrict(body, &bellatrixBlock); err == nil {
|
||||
consensusBlock, err = bellatrixBlock.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
if versionHeader == version.String(version.Bellatrix) {
|
||||
var bellatrixBlock *structs.SignedBeaconBlockBellatrix
|
||||
if err = unmarshalStrict(body, &bellatrixBlock); err == nil {
|
||||
consensusBlock, err = bellatrixBlock.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
if versionHeader == version.String(version.Bellatrix) {
|
||||
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Bellatrix), err.Error()),
|
||||
@@ -1079,19 +1146,20 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
var altairBlock *structs.SignedBeaconBlockAltair
|
||||
if err = unmarshalStrict(body, &altairBlock); err == nil {
|
||||
consensusBlock, err = altairBlock.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
if versionHeader == version.String(version.Altair) {
|
||||
var altairBlock *structs.SignedBeaconBlockAltair
|
||||
if err = unmarshalStrict(body, &altairBlock); err == nil {
|
||||
consensusBlock, err = altairBlock.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
if versionHeader == version.String(version.Altair) {
|
||||
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Altair), err.Error()),
|
||||
@@ -1100,19 +1168,20 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
return
|
||||
}
|
||||
|
||||
var phase0Block *structs.SignedBeaconBlock
|
||||
if err = unmarshalStrict(body, &phase0Block); err == nil {
|
||||
consensusBlock, err = phase0Block.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
if versionHeader == version.String(version.Phase0) {
|
||||
var phase0Block *structs.SignedBeaconBlock
|
||||
if err = unmarshalStrict(body, &phase0Block); err == nil {
|
||||
consensusBlock, err = phase0Block.ToGeneric()
|
||||
if err == nil {
|
||||
if err = s.validateBroadcast(ctx, r, consensusBlock); err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
s.proposeBlock(ctx, w, consensusBlock)
|
||||
return
|
||||
}
|
||||
}
|
||||
if versionHeader == version.String(version.Phase0) {
|
||||
|
||||
httputil.HandleError(
|
||||
w,
|
||||
fmt.Sprintf("Could not decode request body into %s consensus block: %v", version.String(version.Phase0), err.Error()),
|
||||
|
||||
@@ -320,6 +320,13 @@ func (s *Server) handleAttestationsElectra(
|
||||
}
|
||||
|
||||
for i, singleAtt := range validAttestations {
|
||||
s.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.SingleAttReceived,
|
||||
Data: &operation.SingleAttReceivedData{
|
||||
Attestation: singleAtt,
|
||||
},
|
||||
})
|
||||
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
|
||||
@@ -330,13 +337,6 @@ func (s *Server) handleAttestationsElectra(
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
s.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
|
||||
@@ -1407,6 +1407,38 @@ func TestPublishBlock(t *testing.T) {
|
||||
server.PublishBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Capella block without version header succeeds", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
var signedblock *structs.SignedBeaconBlockCapella
|
||||
require.NoError(t, json.Unmarshal([]byte(rpctesting.CapellaBlock), &signedblock))
|
||||
signedblock.Message.Slot = fmt.Sprintf("%d", uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
newBlock, err := json.Marshal(signedblock)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_Capella)
|
||||
converted, err := structs.BeaconBlockCapellaFromConsensus(block.Capella.Block)
|
||||
require.NoError(t, err)
|
||||
var signedblock *structs.SignedBeaconBlockCapella
|
||||
err = json.Unmarshal(newBlock, &signedblock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, converted, signedblock.Message)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(newBlock))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
@@ -1433,11 +1465,10 @@ func TestPublishBlock(t *testing.T) {
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
// Convert back Fulu to Electra when there is at least one difference between Electra and Fulu blocks.
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_Fulu)
|
||||
converted, err := structs.SignedBeaconBlockContentsFuluFromConsensus(block.Fulu)
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_Electra)
|
||||
converted, err := structs.SignedBeaconBlockContentsElectraFromConsensus(block.Electra)
|
||||
require.NoError(t, err)
|
||||
var signedblock *structs.SignedBeaconBlockContentsFulu
|
||||
var signedblock *structs.SignedBeaconBlockContentsElectra
|
||||
err = json.Unmarshal([]byte(rpctesting.FuluBlockContents), &signedblock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, converted, signedblock)
|
||||
@@ -1454,6 +1485,51 @@ func TestPublishBlock(t *testing.T) {
|
||||
server.PublishBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Electra block without version header succeeds", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.ElectraForkEpoch = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
var signedblock *structs.SignedBeaconBlockContentsElectra
|
||||
require.NoError(t, json.Unmarshal([]byte(rpctesting.ElectraBlockContents), &signedblock))
|
||||
signedblock.SignedBlock.Message.Slot = fmt.Sprintf("%d", uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().ElectraForkEpoch))
|
||||
newContents, err := json.Marshal(signedblock)
|
||||
require.NoError(t, err)
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_Electra)
|
||||
converted, err := structs.SignedBeaconBlockContentsElectraFromConsensus(block.Electra)
|
||||
require.NoError(t, err)
|
||||
var signedblock *structs.SignedBeaconBlockContentsElectra
|
||||
err = json.Unmarshal(newContents, &signedblock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, converted, signedblock)
|
||||
return ok
|
||||
}))
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader(newContents))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlock(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
})
|
||||
t.Run("Electra block without version header on wrong fork", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
server := &Server{
|
||||
V1Alpha1ValidatorServer: v1alpha1Server,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(rpctesting.ElectraBlockContents)))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
server.PublishBlock(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
assert.StringContains(t, fmt.Sprintf("Could not decode request body into %s consensus block", version.String(version.Phase0)), writer.Body.String())
|
||||
})
|
||||
t.Run("Fulu", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
@@ -1521,6 +1597,107 @@ func TestPublishBlock(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestVersionHeaderFromRequest(t *testing.T) {
|
||||
t.Run("Fulu block contents returns fulu header", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 7
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
var signedblock *structs.SignedBeaconBlockContentsFulu
|
||||
require.NoError(t, json.Unmarshal([]byte(rpctesting.FuluBlockContents), &signedblock))
|
||||
signedblock.SignedBlock.Message.Slot = fmt.Sprintf("%d", uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().FuluForkEpoch))
|
||||
newContents, err := json.Marshal(signedblock)
|
||||
require.NoError(t, err)
|
||||
versionHead, err := versionHeaderFromRequest(newContents)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.String(version.Fulu), versionHead)
|
||||
})
|
||||
t.Run("Electra block contents returns electra header", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.ElectraForkEpoch = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
var signedblock *structs.SignedBeaconBlockContentsElectra
|
||||
require.NoError(t, json.Unmarshal([]byte(rpctesting.ElectraBlockContents), &signedblock))
|
||||
signedblock.SignedBlock.Message.Slot = fmt.Sprintf("%d", uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().ElectraForkEpoch))
|
||||
newContents, err := json.Marshal(signedblock)
|
||||
require.NoError(t, err)
|
||||
versionHead, err := versionHeaderFromRequest(newContents)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.String(version.Electra), versionHead)
|
||||
})
|
||||
t.Run("Deneb block contents returns deneb header", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
var signedblock *structs.SignedBeaconBlockContentsDeneb
|
||||
require.NoError(t, json.Unmarshal([]byte(rpctesting.DenebBlockContents), &signedblock))
|
||||
signedblock.SignedBlock.Message.Slot = fmt.Sprintf("%d", uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().DenebForkEpoch))
|
||||
newContents, err := json.Marshal(signedblock)
|
||||
require.NoError(t, err)
|
||||
versionHead, err := versionHeaderFromRequest(newContents)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.String(version.Deneb), versionHead)
|
||||
})
|
||||
t.Run("Capella block returns capella header", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 4
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
var signedblock *structs.SignedBeaconBlockCapella
|
||||
require.NoError(t, json.Unmarshal([]byte(rpctesting.CapellaBlock), &signedblock))
|
||||
signedblock.Message.Slot = fmt.Sprintf("%d", uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().CapellaForkEpoch))
|
||||
newBlock, err := json.Marshal(signedblock)
|
||||
require.NoError(t, err)
|
||||
versionHead, err := versionHeaderFromRequest(newBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.String(version.Capella), versionHead)
|
||||
})
|
||||
t.Run("Bellatrix block returns capella header", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BellatrixForkEpoch = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
var signedblock *structs.SignedBeaconBlockBellatrix
|
||||
require.NoError(t, json.Unmarshal([]byte(rpctesting.BellatrixBlock), &signedblock))
|
||||
signedblock.Message.Slot = fmt.Sprintf("%d", uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().BellatrixForkEpoch))
|
||||
newBlock, err := json.Marshal(signedblock)
|
||||
require.NoError(t, err)
|
||||
versionHead, err := versionHeaderFromRequest(newBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.String(version.Bellatrix), versionHead)
|
||||
})
|
||||
t.Run("Altair block returns capella header", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 2
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
var signedblock *structs.SignedBeaconBlockAltair
|
||||
require.NoError(t, json.Unmarshal([]byte(rpctesting.AltairBlock), &signedblock))
|
||||
signedblock.Message.Slot = fmt.Sprintf("%d", uint64(params.BeaconConfig().SlotsPerEpoch)*uint64(params.BeaconConfig().AltairForkEpoch))
|
||||
newBlock, err := json.Marshal(signedblock)
|
||||
require.NoError(t, err)
|
||||
versionHead, err := versionHeaderFromRequest(newBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.String(version.Altair), versionHead)
|
||||
})
|
||||
t.Run("Phase0 block returns capella header", func(t *testing.T) {
|
||||
var signedblock *structs.SignedBeaconBlock
|
||||
require.NoError(t, json.Unmarshal([]byte(rpctesting.Phase0Block), &signedblock))
|
||||
newBlock, err := json.Marshal(signedblock)
|
||||
require.NoError(t, err)
|
||||
versionHead, err := versionHeaderFromRequest(newBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.String(version.Phase0), versionHead)
|
||||
})
|
||||
t.Run("Malformed json returns error unable to peek slot from block contents", func(t *testing.T) {
|
||||
malformedJSON := []byte(`{"age": 30,}`)
|
||||
_, err := versionHeaderFromRequest(malformedJSON)
|
||||
require.ErrorContains(t, "unable to peek slot", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPublishBlockSSZ(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
t.Run("Phase 0", func(t *testing.T) {
|
||||
@@ -2352,12 +2529,11 @@ func TestPublishBlockV2(t *testing.T) {
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
v1alpha1Server := mock2.NewMockBeaconNodeValidatorServer(ctrl)
|
||||
v1alpha1Server.EXPECT().ProposeBeaconBlock(gomock.Any(), mock.MatchedBy(func(req *eth.GenericSignedBeaconBlock) bool {
|
||||
// Convert back Fulu to Electra when there is at least one difference between Electra and Fulu blocks.
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_Fulu)
|
||||
converted, err := structs.SignedBeaconBlockContentsFuluFromConsensus(block.Fulu)
|
||||
block, ok := req.Block.(*eth.GenericSignedBeaconBlock_Electra)
|
||||
converted, err := structs.SignedBeaconBlockContentsElectraFromConsensus(block.Electra)
|
||||
require.NoError(t, err)
|
||||
var signedblock *structs.SignedBeaconBlockContentsFulu
|
||||
err = json.Unmarshal([]byte(rpctesting.FuluBlockContents), &signedblock)
|
||||
var signedblock *structs.SignedBeaconBlockContentsElectra
|
||||
err = json.Unmarshal([]byte(rpctesting.ElectraBlockContents), &signedblock)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, converted, signedblock)
|
||||
return ok
|
||||
|
||||
@@ -42,8 +42,7 @@ func TestBlobs(t *testing.T) {
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), denebBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(blobs)
|
||||
require.NoError(t, err)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
for i := range testSidecars {
|
||||
require.NoError(t, bs.Save(testSidecars[i]))
|
||||
}
|
||||
@@ -418,8 +417,7 @@ func TestBlobs_Electra(t *testing.T) {
|
||||
electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 123, params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra))
|
||||
require.NoError(t, db.SaveBlock(context.Background(), electraBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(blobs)
|
||||
require.NoError(t, err)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
for i := range testSidecars {
|
||||
require.NoError(t, bs.Save(testSidecars[i]))
|
||||
}
|
||||
|
||||
@@ -153,6 +153,13 @@ func TestGetSpec(t *testing.T) {
|
||||
config.UnsetDepositRequestsStartIndex = 92
|
||||
config.MaxDepositRequestsPerPayload = 93
|
||||
config.MaxPendingDepositsPerEpoch = 94
|
||||
config.MaxBlobCommitmentsPerBlock = 95
|
||||
config.MaxBytesPerTransaction = 96
|
||||
config.MaxExtraDataBytes = 97
|
||||
config.BytesPerLogsBloom = 98
|
||||
config.MaxTransactionsPerPayload = 99
|
||||
config.FieldElementsPerBlob = 100
|
||||
config.KzgCommitmentInclusionProofDepth = 101
|
||||
|
||||
var dbp [4]byte
|
||||
copy(dbp[:], []byte{'0', '0', '0', '1'})
|
||||
@@ -191,7 +198,7 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 161, len(data))
|
||||
assert.Equal(t, 168, len(data))
|
||||
for k, v := range data {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
switch k {
|
||||
@@ -437,7 +444,7 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "76", v)
|
||||
case "REORG_MAX_EPOCHS_SINCE_FINALIZATION":
|
||||
assert.Equal(t, "2", v)
|
||||
case "REORG_WEIGHT_THRESHOLD":
|
||||
case "REORG_HEAD_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "20", v)
|
||||
case "REORG_PARENT_WEIGHT_THRESHOLD":
|
||||
assert.Equal(t, "160", v)
|
||||
@@ -538,6 +545,20 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "9", v)
|
||||
case "MAX_REQUEST_BLOB_SIDECARS_ELECTRA":
|
||||
assert.Equal(t, "1152", v)
|
||||
case "MAX_BLOB_COMMITMENTS_PER_BLOCK":
|
||||
assert.Equal(t, "95", v)
|
||||
case "MAX_BYTES_PER_TRANSACTION":
|
||||
assert.Equal(t, "96", v)
|
||||
case "MAX_EXTRA_DATA_BYTES":
|
||||
assert.Equal(t, "97", v)
|
||||
case "BYTES_PER_LOGS_BLOOM":
|
||||
assert.Equal(t, "98", v)
|
||||
case "MAX_TRANSACTIONS_PER_PAYLOAD":
|
||||
assert.Equal(t, "99", v)
|
||||
case "FIELD_ELEMENTS_PER_BLOB":
|
||||
assert.Equal(t, "100", v)
|
||||
case "KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":
|
||||
assert.Equal(t, "101", v)
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
}
|
||||
|
||||
@@ -45,6 +45,8 @@ const (
|
||||
BlockTopic = "block"
|
||||
// AttestationTopic represents a new submitted attestation event topic.
|
||||
AttestationTopic = "attestation"
|
||||
// SingleAttestationTopic represents a new submitted single attestation event topic.
|
||||
SingleAttestationTopic = "single_attestation"
|
||||
// VoluntaryExitTopic represents a new performed voluntary exit event topic.
|
||||
VoluntaryExitTopic = "voluntary_exit"
|
||||
// FinalizedCheckpointTopic represents a new finalized checkpoint event topic.
|
||||
@@ -92,6 +94,7 @@ type lazyReader func() io.Reader
|
||||
var opsFeedEventTopics = map[feed.EventType]string{
|
||||
operation.AggregatedAttReceived: AttestationTopic,
|
||||
operation.UnaggregatedAttReceived: AttestationTopic,
|
||||
operation.SingleAttReceived: SingleAttestationTopic,
|
||||
operation.ExitReceived: VoluntaryExitTopic,
|
||||
operation.SyncCommitteeContributionReceived: SyncCommitteeContributionTopic,
|
||||
operation.BLSToExecutionChangeReceived: BLSToExecutionChangeTopic,
|
||||
@@ -403,7 +406,7 @@ func (es *eventStreamer) writeOutbox(ctx context.Context, w *streamingResponseWr
|
||||
func jsonMarshalReader(name string, v any) io.Reader {
|
||||
d, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("type_name", fmt.Sprintf("%T", v)).Error("Could not marshal event data.")
|
||||
log.WithError(err).WithField("type_name", fmt.Sprintf("%T", v)).Error("Could not marshal event data")
|
||||
return nil
|
||||
}
|
||||
return bytes.NewBufferString("event: " + name + "\ndata: " + string(d) + "\n\n")
|
||||
@@ -415,6 +418,8 @@ func topicForEvent(event *feed.Event) string {
|
||||
return AttestationTopic
|
||||
case *operation.UnAggregatedAttReceivedData:
|
||||
return AttestationTopic
|
||||
case *operation.SingleAttReceivedData:
|
||||
return SingleAttestationTopic
|
||||
case *operation.ExitReceivedData:
|
||||
return VoluntaryExitTopic
|
||||
case *operation.SyncCommitteeContributionReceivedData:
|
||||
@@ -464,10 +469,20 @@ func (s *Server) lazyReaderForEvent(ctx context.Context, event *feed.Event, topi
|
||||
return jsonMarshalReader(eventName, structs.HeadEventFromV1(v))
|
||||
}, nil
|
||||
case *operation.AggregatedAttReceivedData:
|
||||
return func() io.Reader {
|
||||
att := structs.AttFromConsensus(v.Attestation.Aggregate)
|
||||
return jsonMarshalReader(eventName, att)
|
||||
}, nil
|
||||
switch att := v.Attestation.AggregateVal().(type) {
|
||||
case *eth.Attestation:
|
||||
return func() io.Reader {
|
||||
att := structs.AttFromConsensus(att)
|
||||
return jsonMarshalReader(eventName, att)
|
||||
}, nil
|
||||
case *eth.AttestationElectra:
|
||||
return func() io.Reader {
|
||||
att := structs.AttElectraFromConsensus(att)
|
||||
return jsonMarshalReader(eventName, att)
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Wrapf(errUnhandledEventData, "Unexpected type %T for the .Attestation field of AggregatedAttReceivedData", v.Attestation)
|
||||
}
|
||||
case *operation.UnAggregatedAttReceivedData:
|
||||
switch att := v.Attestation.(type) {
|
||||
case *eth.Attestation:
|
||||
@@ -483,6 +498,16 @@ func (s *Server) lazyReaderForEvent(ctx context.Context, event *feed.Event, topi
|
||||
default:
|
||||
return nil, errors.Wrapf(errUnhandledEventData, "Unexpected type %T for the .Attestation field of UnAggregatedAttReceivedData", v.Attestation)
|
||||
}
|
||||
case *operation.SingleAttReceivedData:
|
||||
switch att := v.Attestation.(type) {
|
||||
case *eth.SingleAttestation:
|
||||
return func() io.Reader {
|
||||
att := structs.SingleAttFromConsensus(att)
|
||||
return jsonMarshalReader(eventName, att)
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Wrapf(errUnhandledEventData, "Unexpected type %T for the .Attestation field of SingleAttReceivedData", v.Attestation)
|
||||
}
|
||||
case *operation.ExitReceivedData:
|
||||
return func() io.Reader {
|
||||
return jsonMarshalReader(eventName, structs.SignedExitFromConsensus(v.Exit))
|
||||
|
||||
@@ -110,6 +110,7 @@ func (tr *topicRequest) testHttpRequest(ctx context.Context, _ *testing.T) *http
|
||||
func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
topics, err := newTopicRequest([]string{
|
||||
AttestationTopic,
|
||||
SingleAttestationTopic,
|
||||
VoluntaryExitTopic,
|
||||
SyncCommitteeContributionTopic,
|
||||
BLSToExecutionChangeTopic,
|
||||
@@ -123,13 +124,13 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
vblob := blocks.NewVerifiedROBlob(ro)
|
||||
|
||||
return topics, []*feed.Event{
|
||||
&feed.Event{
|
||||
{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: util.HydrateAttestation(ð.Attestation{}),
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: operation.AggregatedAttReceived,
|
||||
Data: &operation.AggregatedAttReceivedData{
|
||||
Attestation: ð.AggregateAttestationAndProof{
|
||||
@@ -139,7 +140,13 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: operation.SingleAttReceived,
|
||||
Data: &operation.SingleAttReceivedData{
|
||||
Attestation: util.HydrateSingleAttestation(ð.SingleAttestation{}),
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: operation.ExitReceived,
|
||||
Data: &operation.ExitReceivedData{
|
||||
Exit: ð.SignedVoluntaryExit{
|
||||
@@ -151,7 +158,7 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: operation.SyncCommitteeContributionReceived,
|
||||
Data: &operation.SyncCommitteeContributionReceivedData{
|
||||
Contribution: ð.SignedContributionAndProof{
|
||||
@@ -170,7 +177,7 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: operation.BLSToExecutionChangeReceived,
|
||||
Data: &operation.BLSToExecutionChangeReceivedData{
|
||||
Change: ð.SignedBLSToExecutionChange{
|
||||
@@ -183,13 +190,13 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: operation.BlobSidecarReceived,
|
||||
Data: &operation.BlobSidecarReceivedData{
|
||||
Blob: &vblob,
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: operation.AttesterSlashingReceived,
|
||||
Data: &operation.AttesterSlashingReceivedData{
|
||||
AttesterSlashing: ð.AttesterSlashing{
|
||||
@@ -222,7 +229,7 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: operation.AttesterSlashingReceived,
|
||||
Data: &operation.AttesterSlashingReceivedData{
|
||||
AttesterSlashing: ð.AttesterSlashingElectra{
|
||||
@@ -255,7 +262,7 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: operation.ProposerSlashingReceived,
|
||||
Data: &operation.ProposerSlashingReceivedData{
|
||||
ProposerSlashing: ð.ProposerSlashing{
|
||||
@@ -367,7 +374,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(ð.SignedBeaconBlock{}))
|
||||
require.NoError(t, err)
|
||||
events := []*feed.Event{
|
||||
&feed.Event{
|
||||
{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: 0,
|
||||
@@ -377,7 +384,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
Optimistic: false,
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: statefeed.NewHead,
|
||||
Data: ðpb.EventHead{
|
||||
Slot: 0,
|
||||
@@ -389,7 +396,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
ExecutionOptimistic: false,
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: statefeed.Reorg,
|
||||
Data: ðpb.EventChainReorg{
|
||||
Slot: 0,
|
||||
@@ -402,7 +409,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
ExecutionOptimistic: false,
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
{
|
||||
Type: statefeed.FinalizedCheckpoint,
|
||||
Data: ðpb.EventFinalizedCheckpoint{
|
||||
Block: make([]byte, 32),
|
||||
@@ -525,7 +532,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
request := topics.testHttpRequest(testSync.ctx, t)
|
||||
w := NewStreamingResponseWriterRecorder(testSync.ctx)
|
||||
events := []*feed.Event{
|
||||
&feed.Event{
|
||||
{
|
||||
Type: statefeed.PayloadAttributes,
|
||||
Data: payloadattribute.EventData{
|
||||
ProposerIndex: 0,
|
||||
@@ -577,7 +584,7 @@ func TestStuckReaderScenarios(t *testing.T) {
|
||||
|
||||
func wedgedWriterTestCase(t *testing.T, queueDepth func([]*feed.Event) int) {
|
||||
topics, events := operationEventsFixtures(t)
|
||||
require.Equal(t, 9, len(events))
|
||||
require.Equal(t, 10, len(events))
|
||||
|
||||
// set eventFeedDepth to a number lower than the events we intend to send to force the server to drop the reader.
|
||||
stn := mockChain.NewEventFeedWrapper()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -235,19 +235,10 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
return make([]*blocks.VerifiedROBlob, 0), nil
|
||||
}
|
||||
if len(indices) == 0 {
|
||||
m, err := p.BlobStorage.Indices(bytesutil.ToBytes32(root), b.Block().Slot())
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"blockRoot": hexutil.Encode(root),
|
||||
}).Error(errors.Wrapf(err, "could not retrieve blob indices for root %#x", root))
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("could not retrieve blob indices for root %#x", root), Reason: core.Internal}
|
||||
}
|
||||
for k, v := range m {
|
||||
if v {
|
||||
if k >= len(commitments) {
|
||||
return nil, &core.RpcError{Err: fmt.Errorf("blob index %d is more than blob kzg commitments :%dd", k, len(commitments)), Reason: core.BadRequest}
|
||||
}
|
||||
indices = append(indices, uint64(k))
|
||||
sum := p.BlobStorage.Summary(bytesutil.ToBytes32(root))
|
||||
for i := range commitments {
|
||||
if sum.HasIndex(uint64(i)) {
|
||||
indices = append(indices, uint64(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,9 +166,8 @@ func TestGetBlob(t *testing.T) {
|
||||
db := testDB.SetupDB(t)
|
||||
denebBlock, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 123, 4)
|
||||
require.NoError(t, db.SaveBlock(context.Background(), denebBlock))
|
||||
_, bs := filesystem.NewEphemeralBlobStorageWithFs(t)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(blobs)
|
||||
require.NoError(t, err)
|
||||
_, bs := filesystem.NewEphemeralBlobStorageAndFs(t)
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, blobs)
|
||||
for i := range testSidecars {
|
||||
require.NoError(t, bs.Save(testSidecars[i]))
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -46,7 +45,7 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
|
||||
defer span.End()
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, att, nil, att.GetData().CommitteeIndex)
|
||||
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -74,21 +73,20 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
|
||||
defer span.End()
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, singleAtt, singleAtt.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
targetState, err := vs.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get target state")
|
||||
}
|
||||
committeeIndex := singleAtt.GetCommitteeIndex()
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, committeeIndex)
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, singleAtt.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "Could not get committee")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, singleAtt, committee, committeeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
singleAttCopy := singleAtt.Copy()
|
||||
att := singleAttCopy.ToAttestationElectra(committee)
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
@@ -158,7 +156,6 @@ func (vs *Server) SubscribeCommitteeSubnets(ctx context.Context, req *ethpb.Comm
|
||||
func (vs *Server) proposeAtt(
|
||||
ctx context.Context,
|
||||
att ethpb.Att,
|
||||
committee []primitives.ValidatorIndex, // required post-Electra
|
||||
committeeIndex primitives.CommitteeIndex,
|
||||
) (*ethpb.AttestResponse, error) {
|
||||
if _, err := bls.SignatureFromBytes(att.GetSignature()); err != nil {
|
||||
@@ -170,24 +167,23 @@ func (vs *Server) proposeAtt(
|
||||
return nil, status.Errorf(codes.Internal, "Could not get attestation root: %v", err)
|
||||
}
|
||||
|
||||
var singleAtt *ethpb.SingleAttestation
|
||||
if att.Version() >= version.Electra {
|
||||
var ok bool
|
||||
singleAtt, ok = att.(*ethpb.SingleAttestation)
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.Internal, "Attestation has wrong type (expected %T, got %T)", ðpb.SingleAttestation{}, att)
|
||||
}
|
||||
att = singleAtt.ToAttestationElectra(committee)
|
||||
}
|
||||
|
||||
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
|
||||
// of a received unaggregated attestation.
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
if att.IsSingle() {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.SingleAttReceived,
|
||||
Data: &operation.SingleAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Determine subnet to broadcast attestation to
|
||||
wantedEpoch := slots.ToEpoch(att.GetData().Slot)
|
||||
@@ -198,13 +194,7 @@ func (vs *Server) proposeAtt(
|
||||
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), committeeIndex, att.GetData().Slot)
|
||||
|
||||
// Broadcast the new attestation to the network.
|
||||
var attToBroadcast ethpb.Att
|
||||
if singleAtt != nil {
|
||||
attToBroadcast = singleAtt
|
||||
} else {
|
||||
attToBroadcast = att
|
||||
}
|
||||
if err := vs.P2P.BroadcastAttestation(ctx, subnet, attToBroadcast); err != nil {
|
||||
if err := vs.P2P.BroadcastAttestation(ctx, subnet, att); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast attestation: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -71,9 +71,7 @@ func (b *BeaconState) ExitEpochAndUpdateChurn(exitBalance primitives.Gwei) (prim
|
||||
b.earliestExitEpoch = earliestExitEpoch
|
||||
|
||||
b.markFieldAsDirty(types.ExitBalanceToConsume)
|
||||
b.rebuildTrie[types.ExitBalanceToConsume] = true
|
||||
b.markFieldAsDirty(types.EarliestExitEpoch)
|
||||
b.rebuildTrie[types.EarliestExitEpoch] = true
|
||||
|
||||
return b.earliestExitEpoch, nil
|
||||
}
|
||||
|
||||
@@ -23,13 +23,17 @@ func (b *BeaconState) AppendPendingConsolidation(val *ethpb.PendingConsolidation
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[types.PendingConsolidations].MinusRef()
|
||||
b.sharedFieldReferences[types.PendingConsolidations] = stateutil.NewRef(1)
|
||||
|
||||
b.pendingConsolidations = append(b.pendingConsolidations, val)
|
||||
pendingConsolidations := b.pendingConsolidations
|
||||
if b.sharedFieldReferences[types.PendingConsolidations].Refs() > 1 {
|
||||
pendingConsolidations = make([]*ethpb.PendingConsolidation, 0, len(b.pendingConsolidations)+1)
|
||||
pendingConsolidations = append(pendingConsolidations, b.pendingConsolidations...)
|
||||
b.sharedFieldReferences[types.PendingConsolidations].MinusRef()
|
||||
b.sharedFieldReferences[types.PendingConsolidations] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.pendingConsolidations = append(pendingConsolidations, val)
|
||||
b.markFieldAsDirty(types.PendingConsolidations)
|
||||
b.rebuildTrie[types.PendingConsolidations] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -49,7 +53,6 @@ func (b *BeaconState) SetPendingConsolidations(val []*ethpb.PendingConsolidation
|
||||
b.pendingConsolidations = val
|
||||
|
||||
b.markFieldAsDirty(types.PendingConsolidations)
|
||||
b.rebuildTrie[types.PendingConsolidations] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -66,7 +69,6 @@ func (b *BeaconState) SetEarliestConsolidationEpoch(epoch primitives.Epoch) erro
|
||||
b.earliestConsolidationEpoch = epoch
|
||||
|
||||
b.markFieldAsDirty(types.EarliestConsolidationEpoch)
|
||||
b.rebuildTrie[types.EarliestConsolidationEpoch] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -83,6 +85,5 @@ func (b *BeaconState) SetConsolidationBalanceToConsume(balance primitives.Gwei)
|
||||
b.consolidationBalanceToConsume = balance
|
||||
|
||||
b.markFieldAsDirty(types.ConsolidationBalanceToConsume)
|
||||
b.rebuildTrie[types.ConsolidationBalanceToConsume] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,21 @@ func TestAppendPendingConsolidation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), num)
|
||||
|
||||
pc := make([]*eth.PendingConsolidation, 0, 4)
|
||||
require.NoError(t, s.SetPendingConsolidations(pc))
|
||||
require.NoError(t, s.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: 1}))
|
||||
s2 := s.Copy()
|
||||
require.NoError(t, s2.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: 3}))
|
||||
require.NoError(t, s.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: 2}))
|
||||
pc, err = s.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pc[0].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pc[1].SourceIndex)
|
||||
pc, err = s2.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pc[0].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(3), pc[1].SourceIndex)
|
||||
|
||||
// Fails for versions older than electra
|
||||
s, err = state_native.InitializeFromProtoDeneb(ð.BeaconStateDeneb{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -16,6 +16,5 @@ func (b *BeaconState) SetDepositRequestsStartIndex(index uint64) error {
|
||||
|
||||
b.depositRequestsStartIndex = index
|
||||
b.markFieldAsDirty(types.DepositRequestsStartIndex)
|
||||
b.rebuildTrie[types.DepositRequestsStartIndex] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,13 +23,17 @@ func (b *BeaconState) AppendPendingDeposit(pd *ethpb.PendingDeposit) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[types.PendingDeposits].MinusRef()
|
||||
b.sharedFieldReferences[types.PendingDeposits] = stateutil.NewRef(1)
|
||||
|
||||
b.pendingDeposits = append(b.pendingDeposits, pd)
|
||||
pendingDeposits := b.pendingDeposits
|
||||
if b.sharedFieldReferences[types.PendingDeposits].Refs() > 1 {
|
||||
pendingDeposits = make([]*ethpb.PendingDeposit, 0, len(b.pendingDeposits)+1)
|
||||
pendingDeposits = append(pendingDeposits, b.pendingDeposits...)
|
||||
b.sharedFieldReferences[types.PendingDeposits].MinusRef()
|
||||
b.sharedFieldReferences[types.PendingDeposits] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.pendingDeposits = append(pendingDeposits, pd)
|
||||
b.markFieldAsDirty(types.PendingDeposits)
|
||||
b.rebuildTrie[types.PendingDeposits] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -49,7 +53,6 @@ func (b *BeaconState) SetPendingDeposits(val []*ethpb.PendingDeposit) error {
|
||||
b.pendingDeposits = val
|
||||
|
||||
b.markFieldAsDirty(types.PendingDeposits)
|
||||
b.rebuildTrie[types.PendingDeposits] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -66,6 +69,5 @@ func (b *BeaconState) SetDepositBalanceToConsume(dbtc primitives.Gwei) error {
|
||||
b.depositBalanceToConsume = dbtc
|
||||
|
||||
b.markFieldAsDirty(types.DepositBalanceToConsume)
|
||||
b.rebuildTrie[types.DepositBalanceToConsume] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -34,6 +34,21 @@ func TestAppendPendingDeposit(t *testing.T) {
|
||||
require.Equal(t, primitives.Slot(1), pbd[0].Slot)
|
||||
require.DeepEqual(t, sig, pbd[0].Signature)
|
||||
|
||||
ds := make([]*eth.PendingDeposit, 0, 4)
|
||||
require.NoError(t, s.SetPendingDeposits(ds))
|
||||
require.NoError(t, s.AppendPendingDeposit(ð.PendingDeposit{Amount: 1}))
|
||||
s2 := s.Copy()
|
||||
require.NoError(t, s2.AppendPendingDeposit(ð.PendingDeposit{Amount: 3}))
|
||||
require.NoError(t, s.AppendPendingDeposit(ð.PendingDeposit{Amount: 2}))
|
||||
d, err := s.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), d[0].Amount)
|
||||
require.Equal(t, uint64(2), d[1].Amount)
|
||||
d, err = s2.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), d[0].Amount)
|
||||
require.Equal(t, uint64(3), d[1].Amount)
|
||||
|
||||
// Fails for versions older than electra
|
||||
s, err = state_native.InitializeFromProtoDeneb(ð.BeaconStateDeneb{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -22,7 +22,6 @@ func (b *BeaconState) SetPreviousParticipationBits(val []byte) error {
|
||||
|
||||
b.previousEpochParticipation = val
|
||||
b.markFieldAsDirty(types.PreviousEpochParticipationBits)
|
||||
b.rebuildTrie[types.PreviousEpochParticipationBits] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -41,7 +40,6 @@ func (b *BeaconState) SetCurrentParticipationBits(val []byte) error {
|
||||
|
||||
b.currentEpochParticipation = val
|
||||
b.markFieldAsDirty(types.CurrentEpochParticipationBits)
|
||||
b.rebuildTrie[types.CurrentEpochParticipationBits] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -126,7 +124,6 @@ func (b *BeaconState) ModifyPreviousParticipationBits(mutator func(val []byte) (
|
||||
defer b.lock.Unlock()
|
||||
b.previousEpochParticipation = participation
|
||||
b.markFieldAsDirty(types.PreviousEpochParticipationBits)
|
||||
b.rebuildTrie[types.PreviousEpochParticipationBits] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -161,6 +158,5 @@ func (b *BeaconState) ModifyCurrentParticipationBits(mutator func(val []byte) ([
|
||||
defer b.lock.Unlock()
|
||||
b.currentEpochParticipation = participation
|
||||
b.markFieldAsDirty(types.CurrentEpochParticipationBits)
|
||||
b.rebuildTrie[types.CurrentEpochParticipationBits] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -54,13 +54,17 @@ func (b *BeaconState) AppendPendingPartialWithdrawal(ppw *eth.PendingPartialWith
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals].MinusRef()
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1)
|
||||
|
||||
b.pendingPartialWithdrawals = append(b.pendingPartialWithdrawals, ppw)
|
||||
pendingPartialWithdrawals := b.pendingPartialWithdrawals
|
||||
if b.sharedFieldReferences[types.PendingPartialWithdrawals].Refs() > 1 {
|
||||
pendingPartialWithdrawals = make([]*eth.PendingPartialWithdrawal, 0, len(b.pendingPartialWithdrawals)+1)
|
||||
pendingPartialWithdrawals = append(pendingPartialWithdrawals, b.pendingPartialWithdrawals...)
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals].MinusRef()
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.pendingPartialWithdrawals = append(pendingPartialWithdrawals, ppw)
|
||||
b.markFieldAsDirty(types.PendingPartialWithdrawals)
|
||||
b.rebuildTrie[types.PendingPartialWithdrawals] = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -81,8 +85,13 @@ func (b *BeaconState) DequeuePendingPartialWithdrawals(n uint64) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals].MinusRef()
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1)
|
||||
if b.sharedFieldReferences[types.PendingPartialWithdrawals].Refs() > 1 {
|
||||
pendingPartialWithdrawals := make([]*eth.PendingPartialWithdrawal, len(b.pendingPartialWithdrawals))
|
||||
copy(pendingPartialWithdrawals, b.pendingPartialWithdrawals)
|
||||
b.pendingPartialWithdrawals = pendingPartialWithdrawals
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals].MinusRef()
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1)
|
||||
}
|
||||
|
||||
b.pendingPartialWithdrawals = b.pendingPartialWithdrawals[n:]
|
||||
|
||||
|
||||
@@ -68,15 +68,16 @@ func TestDequeuePendingWithdrawals(t *testing.T) {
|
||||
num, err := s.NumPendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(3), num)
|
||||
s2 := s.Copy()
|
||||
require.NoError(t, s.DequeuePendingPartialWithdrawals(2))
|
||||
num, err = s.NumPendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), num)
|
||||
num, err = s2.NumPendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(3), num)
|
||||
|
||||
// 2 of 1 exceeds the limit and an error should be returned
|
||||
num, err = s.NumPendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), num)
|
||||
require.ErrorContains(t, "cannot dequeue more withdrawals than are in the queue", s.DequeuePendingPartialWithdrawals(2))
|
||||
|
||||
// Removing all pending partial withdrawals should be OK.
|
||||
@@ -111,6 +112,19 @@ func TestAppendPendingWithdrawals(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(4), num)
|
||||
|
||||
require.NoError(t, s.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{Index: 1}))
|
||||
s2 := s.Copy()
|
||||
require.NoError(t, s2.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{Index: 3}))
|
||||
require.NoError(t, s.AppendPendingPartialWithdrawal(ð.PendingPartialWithdrawal{Index: 2}))
|
||||
w, err := s.PendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), w[4].Index)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), w[5].Index)
|
||||
w, err = s2.PendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), w[4].Index)
|
||||
require.Equal(t, primitives.ValidatorIndex(3), w[5].Index)
|
||||
|
||||
require.ErrorContains(t, "cannot append nil pending partial withdrawal", s.AppendPendingPartialWithdrawal(nil))
|
||||
|
||||
s, err = InitializeFromProtoDeneb(ð.BeaconStateDeneb{})
|
||||
|
||||
@@ -820,7 +820,7 @@ func InitializeFromProtoUnsafeElectra(st *ethpb.BeaconStateElectra) (state.Beaco
|
||||
b.sharedFieldReferences[types.Slashings] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.PreviousEpochParticipationBits] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.CurrentEpochParticipationBits] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.LatestExecutionPayloadHeaderDeneb] = stateutil.NewRef(1) // New in Electra.
|
||||
b.sharedFieldReferences[types.LatestExecutionPayloadHeaderDeneb] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.HistoricalSummaries] = stateutil.NewRef(1)
|
||||
b.sharedFieldReferences[types.PendingDeposits] = stateutil.NewRef(1) // New in Electra.
|
||||
b.sharedFieldReferences[types.PendingPartialWithdrawals] = stateutil.NewRef(1) // New in Electra.
|
||||
@@ -1487,8 +1487,7 @@ func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interf
|
||||
if fTrie.FieldReference().Refs() > 1 {
|
||||
var newTrie *fieldtrie.FieldTrie
|
||||
// We choose to only copy the validator
|
||||
// trie as it is pretty expensive to regenerate
|
||||
// in the event of late blocks.
|
||||
// trie as it is pretty expensive to regenerate.
|
||||
if index == types.Validators {
|
||||
newTrie = fTrie.CopyTrie()
|
||||
} else {
|
||||
|
||||
@@ -250,8 +250,7 @@ func (c *blobsTestCase) run(t *testing.T) {
|
||||
}
|
||||
}
|
||||
for _, blobSidecars := range m {
|
||||
v, err := verification.BlobSidecarSliceNoop(blobSidecars)
|
||||
require.NoError(t, err)
|
||||
v := verification.FakeVerifySliceForTest(t, blobSidecars)
|
||||
for i := range v {
|
||||
require.NoError(t, s.cfg.blobStorage.Save(v[i]))
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@@ -6,16 +6,52 @@ go_library(
|
||||
"api.go",
|
||||
"file.go",
|
||||
"log.go",
|
||||
"weak-subjectivity.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/checkpoint",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"api_test.go",
|
||||
"weak-subjectivity_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,14 +2,27 @@ package checkpoint
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
|
||||
|
||||
// APIInitializer manages initializing the beacon node using checkpoint sync, retrieving the checkpoint state and root
|
||||
// from the remote beacon node api.
|
||||
type APIInitializer struct {
|
||||
@@ -37,9 +50,115 @@ func (dl *APIInitializer) Initialize(ctx context.Context, d db.Database) error {
|
||||
if err != nil && !errors.Is(err, db.ErrNotFound) {
|
||||
return errors.Wrap(err, "error while checking database for origin root")
|
||||
}
|
||||
od, err := beacon.DownloadFinalizedData(ctx, dl.c)
|
||||
od, err := DownloadFinalizedData(ctx, dl.c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error retrieving checkpoint origin state and block")
|
||||
}
|
||||
return d.SaveOrigin(ctx, od.StateBytes(), od.BlockBytes())
|
||||
}
|
||||
|
||||
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
|
||||
// using Checkpoint Sync.
|
||||
type OriginData struct {
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.ReadOnlySignedBeaconBlock
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (o *OriginData) SaveBlock(dir string) (string, error) {
|
||||
blockPath := path.Join(dir, fname("block", o.vu, o.b.Block().Slot(), o.br))
|
||||
return blockPath, file.WriteFile(blockPath, o.BlockBytes())
|
||||
}
|
||||
|
||||
// SaveState saves the downloaded state to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (o *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", o.vu, o.st.Slot(), o.sr))
|
||||
return statePath, file.WriteFile(statePath, o.StateBytes())
|
||||
}
|
||||
|
||||
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
|
||||
func (o *OriginData) StateBytes() []byte {
|
||||
return o.sb
|
||||
}
|
||||
|
||||
// BlockBytes returns the ssz-encoded bytes of the downloaded ReadOnlySignedBeaconBlock value.
|
||||
func (o *OriginData) BlockBytes() []byte {
|
||||
return o.bb
|
||||
}
|
||||
|
||||
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot primitives.Slot, root [32]byte) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
|
||||
}
|
||||
|
||||
// DownloadFinalizedData downloads the most recently finalized state, and the block most recently applied to that state.
|
||||
// This pair can be used to initialize a new beacon node via checkpoint sync.
|
||||
func DownloadFinalizedData(ctx context.Context, client *beacon.Client) (*OriginData, error) {
|
||||
sb, err := client.GetState(ctx, beacon.IdFinalized)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"name": vu.Config.ConfigName,
|
||||
"fork": version.String(vu.Fork),
|
||||
}).Info("Detected supported config in remote finalized state")
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||
}
|
||||
|
||||
slot := s.LatestBlockHeader().Slot
|
||||
bb, err := client.GetBlock(ctx, beacon.IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
br, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
bodyRoot, err := b.Block().Body().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
|
||||
}
|
||||
|
||||
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
|
||||
if sbr != bodyRoot {
|
||||
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
|
||||
}
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
|
||||
}
|
||||
|
||||
log.
|
||||
WithField("blockSlot", b.Block().Slot()).
|
||||
WithField("stateSlot", s.Slot()).
|
||||
WithField("stateRoot", hexutil.Encode(sr[:])).
|
||||
WithField("blockRoot", hexutil.Encode(br[:])).
|
||||
Info("Downloaded checkpoint sync state and block.")
|
||||
return &OriginData{
|
||||
st: s,
|
||||
b: b,
|
||||
sb: sb,
|
||||
bb: bb,
|
||||
vu: vu,
|
||||
br: br,
|
||||
sr: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
127
beacon-chain/sync/checkpoint/api_test.go
Normal file
127
beacon-chain/sync/checkpoint/api_test.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package checkpoint
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
blocktest "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestDownloadFinalizedData(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
|
||||
// avoid the altair zone because genesis tests are easier to set up
|
||||
epoch := cfg.AltairForkEpoch - 1
|
||||
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forks.ForkForEpochFromConfig(cfg, epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
|
||||
// set up checkpoint block
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
|
||||
require.NoError(t, err)
|
||||
b, err = blocktest.SetBlockSlot(b, slot)
|
||||
require.NoError(t, err)
|
||||
b, err = blocktest.SetProposerIndex(b, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
||||
header, err := b.Header()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestBlockHeader(header.Header))
|
||||
|
||||
// order of operations can be confusing here:
|
||||
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
|
||||
// - before computing the block root (to match the request route), the block should include the state root
|
||||
// *computed from the state with a header that does not have a state root set yet*
|
||||
sr, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
b, err = blocktest.SetBlockStateRoot(b, sr)
|
||||
require.NoError(t, err)
|
||||
mb, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
br, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ms, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case beacon.RenderGetStatePath(beacon.IdFinalized):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(ms))
|
||||
case beacon.RenderGetBlockPath(beacon.IdFromSlot(b.Block().Slot())):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(mb))
|
||||
default:
|
||||
res.StatusCode = http.StatusInternalServerError
|
||||
res.Body = io.NopCloser(bytes.NewBufferString(""))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}}
|
||||
c, err := beacon.NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
// sanity check before we go through checkpoint
|
||||
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
|
||||
sb, err := c.GetState(ctx, beacon.IdFinalized)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(sb, ms))
|
||||
vu, err := detect.FromState(sb)
|
||||
require.NoError(t, err)
|
||||
us, err := vu.UnmarshalBeaconState(sb)
|
||||
require.NoError(t, err)
|
||||
ushtr, err := us.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, sr, ushtr)
|
||||
|
||||
expected := &OriginData{
|
||||
sb: ms,
|
||||
bb: mb,
|
||||
br: br,
|
||||
sr: sr,
|
||||
}
|
||||
od, err := DownloadFinalizedData(ctx, c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expected.sb, od.sb))
|
||||
require.Equal(t, true, bytes.Equal(expected.bb, od.bb))
|
||||
require.Equal(t, expected.br, od.br)
|
||||
require.Equal(t, expected.sr, od.sr)
|
||||
}
|
||||
|
||||
type testRT struct {
|
||||
rt func(*http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
func (rt *testRT) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if rt.rt != nil {
|
||||
return rt.rt(req)
|
||||
}
|
||||
return nil, errors.New("RoundTripper not implemented")
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = &testRT{}
|
||||
128
beacon-chain/sync/checkpoint/weak-subjectivity.go
Normal file
128
beacon-chain/sync/checkpoint/weak-subjectivity.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package checkpoint
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
base "github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// ComputeWeakSubjectivityCheckpoint attempts to use the prysm weak_subjectivity api
|
||||
// to obtain the current weak_subjectivity checkpoint.
|
||||
// For non-prysm nodes, the same computation will be performed with extra steps,
|
||||
// using the head state downloaded from the beacon node api.
|
||||
func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *beacon.Client) (*beacon.WeakSubjectivityData, error) {
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
if err != nil {
|
||||
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, base.ErrNotOK) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// fall back to vanilla Beacon Node API method
|
||||
return computeBackwardsCompatible(ctx, client)
|
||||
}
|
||||
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
// for clients that do not support the weak_subjectivity api method we gather the necessary data for a checkpoint sync by:
|
||||
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
|
||||
// - requesting the state at the first slot of the epoch
|
||||
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
|
||||
// - requesting that block by its root
|
||||
func computeBackwardsCompatible(ctx context.Context, client *beacon.Client) (*beacon.WeakSubjectivityData, error) {
|
||||
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
|
||||
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
|
||||
}
|
||||
|
||||
// use first slot of the epoch for the state slot
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", epoch)
|
||||
}
|
||||
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
// get the state at the first slot of the epoch
|
||||
sb, err := client.GetState(ctx, beacon.IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
|
||||
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
|
||||
// compute state and block roots
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
|
||||
}
|
||||
|
||||
h := s.LatestBlockHeader()
|
||||
h.StateRoot = sr[:]
|
||||
br, err := h.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
bb, err := client.GetBlock(ctx, beacon.IdFromRoot(br))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %d", br)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
br, err = b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
|
||||
}
|
||||
|
||||
return &beacon.WeakSubjectivityData{
|
||||
Epoch: epoch,
|
||||
BlockRoot: br,
|
||||
StateRoot: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// this method downloads the head state, which can be used to find the correct chain config
|
||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *beacon.Client) (primitives.Epoch, error) {
|
||||
headBytes, err := client.GetState(ctx, beacon.IdHead)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
vu, err := detect.FromState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in remote head state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
headState, err := vu.UnmarshalBeaconState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
|
||||
}
|
||||
|
||||
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, vu.Config)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
|
||||
}
|
||||
|
||||
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
|
||||
return epoch, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package beacon
|
||||
package checkpoint
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -25,19 +26,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
type testRT struct {
|
||||
rt func(*http.Request) (*http.Response, error)
|
||||
}
|
||||
|
||||
func (rt *testRT) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if rt.rt != nil {
|
||||
return rt.rt(req)
|
||||
}
|
||||
return nil, errors.New("RoundTripper not implemented")
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = &testRT{}
|
||||
|
||||
func marshalToEnvelope(val interface{}) ([]byte, error) {
|
||||
raw, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
@@ -63,35 +51,6 @@ func TestMarshalToEnvelope(t *testing.T) {
|
||||
require.Equal(t, expected, string(encoded))
|
||||
}
|
||||
|
||||
func TestFallbackVersionCheck(t *testing.T) {
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getNodeVersionPath:
|
||||
res.StatusCode = http.StatusOK
|
||||
b := bytes.NewBuffer(nil)
|
||||
d := struct {
|
||||
Version string `json:"version"`
|
||||
}{
|
||||
Version: "Prysm/v2.0.5 (linux amd64)",
|
||||
}
|
||||
encoded, err := marshalToEnvelope(d)
|
||||
require.NoError(t, err)
|
||||
b.Write(encoded)
|
||||
res.Body = io.NopCloser(b)
|
||||
case getWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusNotFound
|
||||
}
|
||||
return res, nil
|
||||
}}
|
||||
|
||||
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
_, err = ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
require.ErrorIs(t, err, errUnsupportedPrysmCheckpointVersion)
|
||||
}
|
||||
|
||||
func TestFname(t *testing.T) {
|
||||
vu := &detect.VersionedUnmarshaler{
|
||||
Config: params.MainnetConfig(),
|
||||
@@ -160,7 +119,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
|
||||
wsSerialized, err := wst.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
expectedWSD := WeakSubjectivityData{
|
||||
expectedWSD := beacon.WeakSubjectivityData{
|
||||
BlockRoot: bRoot,
|
||||
StateRoot: wRoot,
|
||||
Epoch: epoch,
|
||||
@@ -169,7 +128,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getWeakSubjectivityPath:
|
||||
case beacon.GetWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusOK
|
||||
cp := struct {
|
||||
Epoch string `json:"epoch"`
|
||||
@@ -188,10 +147,10 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
rb, err := marshalToEnvelope(wsr)
|
||||
require.NoError(t, err)
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(rb))
|
||||
case renderGetStatePath(IdFromSlot(wSlot)):
|
||||
case beacon.RenderGetStatePath(beacon.IdFromSlot(wSlot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
|
||||
case renderGetBlockPath(IdFromRoot(bRoot)):
|
||||
case beacon.RenderGetBlockPath(beacon.IdFromRoot(bRoot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
|
||||
}
|
||||
@@ -199,7 +158,7 @@ func TestDownloadWeakSubjectivityCheckpoint(t *testing.T) {
|
||||
return res, nil
|
||||
}}
|
||||
|
||||
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
c, err := beacon.NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
|
||||
wsd, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
@@ -263,7 +222,7 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case getNodeVersionPath:
|
||||
case beacon.GetNodeVersionPath:
|
||||
res.StatusCode = http.StatusOK
|
||||
b := bytes.NewBuffer(nil)
|
||||
d := struct {
|
||||
@@ -275,15 +234,15 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
b.Write(encoded)
|
||||
res.Body = io.NopCloser(b)
|
||||
case getWeakSubjectivityPath:
|
||||
case beacon.GetWeakSubjectivityPath:
|
||||
res.StatusCode = http.StatusNotFound
|
||||
case renderGetStatePath(IdHead):
|
||||
case beacon.RenderGetStatePath(beacon.IdHead):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
|
||||
case renderGetStatePath(IdFromSlot(wSlot)):
|
||||
case beacon.RenderGetStatePath(beacon.IdFromSlot(wSlot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(wsSerialized))
|
||||
case renderGetBlockPath(IdFromRoot(bRoot)):
|
||||
case beacon.RenderGetBlockPath(beacon.IdFromRoot(bRoot)):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serBlock))
|
||||
}
|
||||
@@ -291,7 +250,7 @@ func TestDownloadBackwardsCompatibleCombined(t *testing.T) {
|
||||
return res, nil
|
||||
}}
|
||||
|
||||
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
c, err := beacon.NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
|
||||
wsPub, err := ComputeWeakSubjectivityCheckpoint(ctx, c)
|
||||
@@ -308,13 +267,13 @@ func TestGetWeakSubjectivityEpochFromHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
if req.URL.Path == renderGetStatePath(IdHead) {
|
||||
if req.URL.Path == beacon.RenderGetStatePath(beacon.IdHead) {
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(serialized))
|
||||
}
|
||||
return res, nil
|
||||
}}
|
||||
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
c, err := beacon.NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
actualEpoch, err := getWeakSubjectivityEpochFromHead(context.Background(), c)
|
||||
require.NoError(t, err)
|
||||
@@ -386,96 +345,3 @@ func populateValidators(cfg *params.BeaconChainConfig, st state.BeaconState, val
|
||||
}
|
||||
return st.SetBalances(balances)
|
||||
}
|
||||
|
||||
func TestDownloadFinalizedData(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
cfg := params.MainnetConfig().Copy()
|
||||
|
||||
// avoid the altair zone because genesis tests are easier to set up
|
||||
epoch := cfg.AltairForkEpoch - 1
|
||||
// set up checkpoint state, using the epoch that will be computed as the ws checkpoint state based on the head state
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
fork, err := forkForEpoch(cfg, epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetFork(fork))
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
|
||||
// set up checkpoint block
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b, err = blocktest.SetBlockParentRoot(b, cfg.ZeroHash)
|
||||
require.NoError(t, err)
|
||||
b, err = blocktest.SetBlockSlot(b, slot)
|
||||
require.NoError(t, err)
|
||||
b, err = blocktest.SetProposerIndex(b, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// set up state header pointing at checkpoint block - this is how the block is downloaded by root
|
||||
header, err := b.Header()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestBlockHeader(header.Header))
|
||||
|
||||
// order of operations can be confusing here:
|
||||
// - when computing the state root, make sure block header is complete, EXCEPT the state root should be zero-value
|
||||
// - before computing the block root (to match the request route), the block should include the state root
|
||||
// *computed from the state with a header that does not have a state root set yet*
|
||||
sr, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
b, err = blocktest.SetBlockStateRoot(b, sr)
|
||||
require.NoError(t, err)
|
||||
mb, err := b.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
br, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ms, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
trans := &testRT{rt: func(req *http.Request) (*http.Response, error) {
|
||||
res := &http.Response{Request: req}
|
||||
switch req.URL.Path {
|
||||
case renderGetStatePath(IdFinalized):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(ms))
|
||||
case renderGetBlockPath(IdFromSlot(b.Block().Slot())):
|
||||
res.StatusCode = http.StatusOK
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(mb))
|
||||
default:
|
||||
res.StatusCode = http.StatusInternalServerError
|
||||
res.Body = io.NopCloser(bytes.NewBufferString(""))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}}
|
||||
c, err := NewClient("http://localhost:3500", client.WithRoundTripper(trans))
|
||||
require.NoError(t, err)
|
||||
// sanity check before we go through checkpoint
|
||||
// make sure we can download the state and unmarshal it with the VersionedUnmarshaler
|
||||
sb, err := c.GetState(ctx, IdFinalized)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(sb, ms))
|
||||
vu, err := detect.FromState(sb)
|
||||
require.NoError(t, err)
|
||||
us, err := vu.UnmarshalBeaconState(sb)
|
||||
require.NoError(t, err)
|
||||
ushtr, err := us.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, sr, ushtr)
|
||||
|
||||
expected := &OriginData{
|
||||
sb: ms,
|
||||
bb: mb,
|
||||
br: br,
|
||||
sr: sr,
|
||||
}
|
||||
od, err := DownloadFinalizedData(ctx, c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expected.sb, od.sb))
|
||||
require.Equal(t, true, bytes.Equal(expected.bb, od.bb))
|
||||
require.Equal(t, expected.br, od.br)
|
||||
require.Equal(t, expected.sr, od.sr)
|
||||
}
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -70,14 +69,6 @@ func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.S
|
||||
return nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root = %#x", vr)
|
||||
}
|
||||
|
||||
summarizer, err := s.cfg.BlobStorage.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
// The summarizer is an optional optimization, we can continue without, only stop if there is a different error.
|
||||
if !errors.Is(err, filesystem.ErrBlobStorageSummarizerUnavailable) {
|
||||
return nil, err
|
||||
}
|
||||
summarizer = nil // This should already be nil, but we'll set it just to be safe.
|
||||
}
|
||||
cfg := &blocksQueueConfig{
|
||||
p2p: s.cfg.P2P,
|
||||
db: s.cfg.DB,
|
||||
@@ -86,7 +77,7 @@ func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.S
|
||||
ctxMap: ctxMap,
|
||||
highestExpectedSlot: highestSlot,
|
||||
mode: mode,
|
||||
bs: summarizer,
|
||||
bs: s.cfg.BlobStorage,
|
||||
}
|
||||
queue := newBlocksQueue(ctx, cfg)
|
||||
if err := queue.start(); err != nil {
|
||||
|
||||
@@ -292,13 +292,10 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt
|
||||
if len(cmts) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
onDisk, err := store.Indices(r, blk.Block().Slot())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking existing blobs for checkpoint sync block root %#x", r)
|
||||
}
|
||||
onDisk := store.Summary(r)
|
||||
req := make(p2ptypes.BlobSidecarsByRootReq, 0, len(cmts))
|
||||
for i := range cmts {
|
||||
if onDisk[i] {
|
||||
if onDisk.HasIndex(uint64(i)) {
|
||||
continue
|
||||
}
|
||||
req = append(req, ð.BlobIdentifier{BlockRoot: r[:], Index: uint64(i)})
|
||||
|
||||
@@ -464,7 +464,7 @@ func TestMissingBlobRequest(t *testing.T) {
|
||||
setup: func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage) {
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 2)
|
||||
bm, fs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
require.NoError(t, bm.CreateFakeIndices(bk.Root(), 1))
|
||||
require.NoError(t, bm.CreateFakeIndices(bk.Root(), bk.Block().Slot(), 1))
|
||||
return bk, fs
|
||||
},
|
||||
nReq: 1,
|
||||
@@ -474,7 +474,7 @@ func TestMissingBlobRequest(t *testing.T) {
|
||||
setup: func(t *testing.T) (blocks.ROBlock, *filesystem.BlobStorage) {
|
||||
bk, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 0, 2)
|
||||
bm, fs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
require.NoError(t, bm.CreateFakeIndices(bk.Root(), 0, 1))
|
||||
require.NoError(t, bm.CreateFakeIndices(bk.Root(), bk.Block().Slot(), 0, 1))
|
||||
return bk, fs
|
||||
},
|
||||
nReq: 0,
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -202,6 +204,7 @@ func (s *Service) processUnaggregated(ctx context.Context, att ethpb.Att) {
|
||||
log.WithError(err).Debug("Could not retrieve active validator count")
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
var attToBroadcast ethpb.Att
|
||||
if singleAtt != nil {
|
||||
@@ -212,6 +215,24 @@ func (s *Service) processUnaggregated(ctx context.Context, att ethpb.Att) {
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, attToBroadcast), attToBroadcast); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast")
|
||||
}
|
||||
|
||||
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
|
||||
// of a received unaggregated attestation.
|
||||
if singleAtt != nil {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.SingleAttReceived,
|
||||
Data: &operation.SingleAttReceivedData{
|
||||
Attestation: singleAtt,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -10,6 +11,8 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/abool"
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/operation"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
@@ -105,15 +108,22 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
|
||||
done := make(chan *feed.Event, 1)
|
||||
defer close(done)
|
||||
opn := mock.NewEventFeedWrapper()
|
||||
sub := opn.Subscribe(done)
|
||||
defer sub.Unsubscribe()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: &mock.SimpleNotifier{Feed: opn},
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
@@ -128,12 +138,28 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAtt(t *testing.T) {
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case received := <-done:
|
||||
// make sure a single att was sent
|
||||
require.Equal(t, operation.UnaggregatedAttReceived, int(received.Type))
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
atts, err := r.cfg.attPool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(atts), "Did not save unaggregated att")
|
||||
assert.DeepEqual(t, att, atts[0], "Incorrect saved att")
|
||||
assert.Equal(t, 0, len(r.cfg.attPool.AggregatedAttestations()), "Did save aggregated att")
|
||||
require.LogsContain(t, hook, "Verified and saved pending attestations to pool")
|
||||
wg.Wait()
|
||||
cancel()
|
||||
}
|
||||
|
||||
@@ -179,15 +205,21 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) {
|
||||
Epoch: 0,
|
||||
},
|
||||
}
|
||||
done := make(chan *feed.Event, 1)
|
||||
defer close(done)
|
||||
opn := mock.NewEventFeedWrapper()
|
||||
sub := opn.Subscribe(done)
|
||||
defer sub.Unsubscribe()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: &mock.SimpleNotifier{Feed: opn},
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
@@ -201,13 +233,28 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra(t *testing.T) {
|
||||
|
||||
r.blkRootToPendingAtts[root] = []ethpb.SignedAggregateAttAndProof{ðpb.SignedAggregateAttestationAndProofSingle{Message: aggregateAndProof}}
|
||||
require.NoError(t, r.processPendingAtts(context.Background()))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case received := <-done:
|
||||
// make sure a single att was sent
|
||||
require.Equal(t, operation.SingleAttReceived, int(received.Type))
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
atts, err := r.cfg.attPool.UnaggregatedAttestations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(atts), "Did not save unaggregated att")
|
||||
assert.DeepEqual(t, att.ToAttestationElectra(committee), atts[0], "Incorrect saved att")
|
||||
assert.Equal(t, 0, len(r.cfg.attPool.AggregatedAttestations()), "Did save aggregated att")
|
||||
require.LogsContain(t, hook, "Verified and saved pending attestations to pool")
|
||||
wg.Wait()
|
||||
cancel()
|
||||
}
|
||||
|
||||
@@ -304,11 +351,12 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
r = &Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain2,
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
p2p: p1,
|
||||
beaconDB: db,
|
||||
chain: chain2,
|
||||
clock: startup.NewClock(chain2.Genesis, chain2.ValidatorsRoot),
|
||||
attPool: attestations.NewPool(),
|
||||
attestationNotifier: &mock.MockOperationNotifier{},
|
||||
},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]ethpb.SignedAggregateAttAndProof),
|
||||
seenUnAggregatedAttestationCache: lruwrpr.New(10),
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify"
|
||||
@@ -14,7 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -181,29 +181,26 @@ func (s *Service) pendingBlobsRequestForBlock(root [32]byte, b interfaces.ReadOn
|
||||
if len(cc) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return s.constructPendingBlobsRequest(root, len(cc), b.Block().Slot())
|
||||
return s.constructPendingBlobsRequest(root, len(cc))
|
||||
}
|
||||
|
||||
// constructPendingBlobsRequest creates a request for BlobSidecars by root, considering blobs already in DB.
|
||||
func (s *Service) constructPendingBlobsRequest(root [32]byte, commitments int, slot primitives.Slot) (types.BlobSidecarsByRootReq, error) {
|
||||
func (s *Service) constructPendingBlobsRequest(root [32]byte, commitments int) (types.BlobSidecarsByRootReq, error) {
|
||||
if commitments == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
stored, err := s.cfg.blobStorage.Indices(root, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summary := s.cfg.blobStorage.Summary(root)
|
||||
|
||||
return requestsForMissingIndices(stored, commitments, root), nil
|
||||
return requestsForMissingIndices(summary, commitments, root), nil
|
||||
}
|
||||
|
||||
// requestsForMissingIndices constructs a slice of BlobIdentifiers that are missing from
|
||||
// local storage, based on a mapping that represents which indices are locally stored,
|
||||
// and the highest expected index.
|
||||
func requestsForMissingIndices(storedIndices []bool, commitments int, root [32]byte) []*eth.BlobIdentifier {
|
||||
func requestsForMissingIndices(stored filesystem.BlobStorageSummary, commitments int, root [32]byte) []*eth.BlobIdentifier {
|
||||
var ids []*eth.BlobIdentifier
|
||||
for i := uint64(0); i < uint64(commitments); i++ {
|
||||
if !storedIndices[i] {
|
||||
if !stored.HasIndex(i) {
|
||||
ids = append(ids, ð.BlobIdentifier{Index: i, BlockRoot: root[:]})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -423,7 +423,7 @@ func TestConstructPendingBlobsRequest(t *testing.T) {
|
||||
// No unknown indices.
|
||||
root := [32]byte{1}
|
||||
count := 3
|
||||
actual, err := s.constructPendingBlobsRequest(root, count, 100)
|
||||
actual, err := s.constructPendingBlobsRequest(root, count)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(actual))
|
||||
for i, id := range actual {
|
||||
@@ -444,8 +444,7 @@ func TestConstructPendingBlobsRequest(t *testing.T) {
|
||||
util.GenerateTestDenebBlobSidecar(t, root, header, 0, bytesutil.PadTo([]byte{}, 48), make([][]byte, 0)),
|
||||
util.GenerateTestDenebBlobSidecar(t, root, header, 2, bytesutil.PadTo([]byte{}, 48), make([][]byte, 0)),
|
||||
}
|
||||
vscs, err := verification.BlobSidecarSliceNoop(blobSidecars)
|
||||
require.NoError(t, err)
|
||||
vscs := verification.FakeVerifySliceForTest(t, blobSidecars)
|
||||
for i := range vscs {
|
||||
require.NoError(t, bs.Save(vscs[i]))
|
||||
}
|
||||
@@ -453,15 +452,13 @@ func TestConstructPendingBlobsRequest(t *testing.T) {
|
||||
expected := []*ethpb.BlobIdentifier{
|
||||
{Index: 1, BlockRoot: root[:]},
|
||||
}
|
||||
actual, err = s.constructPendingBlobsRequest(root, count, 100)
|
||||
actual, err = s.constructPendingBlobsRequest(root, count)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected[0].Index, actual[0].Index)
|
||||
require.DeepEqual(t, expected[0].BlockRoot, actual[0].BlockRoot)
|
||||
}
|
||||
|
||||
func TestFilterUnknownIndices(t *testing.T) {
|
||||
haveIndices := []bool{true, true, true, false, false, false}
|
||||
|
||||
blockRoot := [32]byte{}
|
||||
count := 5
|
||||
|
||||
@@ -470,7 +467,11 @@ func TestFilterUnknownIndices(t *testing.T) {
|
||||
{Index: 4, BlockRoot: blockRoot[:]},
|
||||
}
|
||||
|
||||
actual := requestsForMissingIndices(haveIndices, count, blockRoot)
|
||||
sum, err := filesystem.NewBlobStorageSummary(
|
||||
params.BeaconConfig().DenebForkEpoch,
|
||||
[]bool{true, true, true, false, false, false})
|
||||
require.NoError(t, err)
|
||||
actual := requestsForMissingIndices(sum, count, blockRoot)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
require.Equal(t, expected[0].Index, actual[0].Index)
|
||||
require.DeepEqual(t, actual[0].BlockRoot, expected[0].BlockRoot)
|
||||
|
||||
@@ -27,14 +27,10 @@ func (s *Service) streamBlobBatch(ctx context.Context, batch blockBatch, wQuota
|
||||
defer span.End()
|
||||
for _, b := range batch.canonical() {
|
||||
root := b.Root()
|
||||
idxs, err := s.cfg.blobStorage.Indices(b.Root(), b.Block().Slot())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
return wQuota, errors.Wrapf(err, "could not retrieve sidecars for block root %#x", root)
|
||||
}
|
||||
for i, l := uint64(0), uint64(len(idxs)); i < l; i++ {
|
||||
idxs := s.cfg.blobStorage.Summary(root)
|
||||
for i := range idxs.MaxBlobsForEpoch() {
|
||||
// index not available, skip
|
||||
if !idxs[i] {
|
||||
if !idxs.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
// We won't check for file not found since the .Indices method should normally prevent that from happening.
|
||||
@@ -148,9 +144,14 @@ func BlobRPCMinValidSlot(current primitives.Slot) (primitives.Slot, error) {
|
||||
return slots.EpochStart(minStart)
|
||||
}
|
||||
|
||||
// This function is used to derive what is the ideal block batch size we can serve
|
||||
// blobs to the remote peer for. We compute the current limit which is the maximum
|
||||
// blobs to be served to the peer every period. And then using the maximum blobs per
|
||||
// block determine the block batch size satisfying this limit.
|
||||
func blobBatchLimit(slot primitives.Slot) uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
return uint64(flags.Get().BlockBatchLimit / maxBlobsPerBlock)
|
||||
maxPossibleBlobs := flags.Get().BlobBatchLimit * flags.Get().BlobBatchLimitBurstFactor
|
||||
return uint64(maxPossibleBlobs / maxBlobsPerBlock)
|
||||
}
|
||||
|
||||
func validateBlobsByRange(r *pb.BlobSidecarsByRangeRequest, current primitives.Slot) (rangeParams, error) {
|
||||
|
||||
@@ -169,7 +169,7 @@ func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle,
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
maxBlobsPerBlock := uint64(params.BeaconConfig().MaxBlobsPerBlock(req.StartSlot))
|
||||
maxBlobsPerBlock := uint64(params.BeaconConfig().MaxBlobsPerBlock(req.StartSlot + primitives.Slot(req.Count)))
|
||||
max := params.BeaconConfig().MaxRequestBlobSidecars
|
||||
if slots.ToEpoch(req.StartSlot) >= params.BeaconConfig().ElectraForkEpoch {
|
||||
max = params.BeaconConfig().MaxRequestBlobSidecarsElectra
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestSendRequest_SendBeaconBlocksByRangeRequest(t *testing.T) {
|
||||
@@ -691,3 +692,122 @@ func TestSeqBlobValid(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendBlobsByRangeRequest(t *testing.T) {
|
||||
topic := fmt.Sprintf("%s/ssz_snappy", p2p.RPCBlobSidecarsByRangeTopicV1)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("single blob - Deneb", func(t *testing.T) {
|
||||
// Setup genesis such that we are currently in deneb.
|
||||
s := uint64(slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
clock := startup.NewClock(time.Now().Add(-time.Second*time.Duration(s)), [32]byte{})
|
||||
ctxByte, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
// Setup peers
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
// Set current slot to a deneb slot.
|
||||
slot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch + 1)
|
||||
// Create a simple handler that will return a valid response.
|
||||
p2.SetStreamHandler(topic, func(stream network.Stream) {
|
||||
defer func() {
|
||||
assert.NoError(t, stream.Close())
|
||||
}()
|
||||
|
||||
req := ðpb.BlobSidecarsByRangeRequest{}
|
||||
assert.NoError(t, p2.Encoding().DecodeWithMaxLength(stream, req))
|
||||
assert.Equal(t, slot, req.StartSlot)
|
||||
assert.Equal(t, uint64(1), req.Count)
|
||||
|
||||
// Create a sequential set of blobs with the appropriate header information.
|
||||
var prevRoot [32]byte
|
||||
for i := req.StartSlot; i < req.StartSlot+primitives.Slot(req.Count); i++ {
|
||||
b := util.HydrateBlobSidecar(ðpb.BlobSidecar{})
|
||||
b.SignedBlockHeader.Header.Slot = i
|
||||
b.SignedBlockHeader.Header.ParentRoot = prevRoot[:]
|
||||
ro, err := blocks.NewROBlob(b)
|
||||
require.NoError(t, err)
|
||||
vro := blocks.NewVerifiedROBlob(ro)
|
||||
prevRoot = vro.BlockRoot()
|
||||
assert.NoError(t, WriteBlobSidecarChunk(stream, clock, p2.Encoding(), vro))
|
||||
}
|
||||
})
|
||||
req := ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: slot,
|
||||
Count: 1,
|
||||
}
|
||||
|
||||
blobs, err := SendBlobsByRangeRequest(ctx, clock, p1, p2.PeerID(), ctxByte, req)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(blobs))
|
||||
})
|
||||
|
||||
t.Run("Deneb - Electra epoch boundary crossing", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ElectraForkEpoch = cfg.DenebForkEpoch + 1
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, undo())
|
||||
}()
|
||||
// Setup genesis such that we are currently in deneb.
|
||||
s := uint64(slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)) * params.BeaconConfig().SecondsPerSlot
|
||||
clock := startup.NewClock(time.Now().Add(-time.Second*time.Duration(s)), [32]byte{})
|
||||
ctxByte, err := ContextByteVersionsForValRoot(clock.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
// Setup peers
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
// Set current slot to the first slot of the last deneb epoch.
|
||||
slot := slots.UnsafeEpochStart(params.BeaconConfig().DenebForkEpoch)
|
||||
// Create a simple handler that will return a valid response.
|
||||
p2.SetStreamHandler(topic, func(stream network.Stream) {
|
||||
defer func() {
|
||||
assert.NoError(t, stream.Close())
|
||||
}()
|
||||
|
||||
req := ðpb.BlobSidecarsByRangeRequest{}
|
||||
assert.NoError(t, p2.Encoding().DecodeWithMaxLength(stream, req))
|
||||
assert.Equal(t, slot, req.StartSlot)
|
||||
assert.Equal(t, uint64(params.BeaconConfig().SlotsPerEpoch)*3, req.Count)
|
||||
|
||||
// Create a sequential set of blobs with the appropriate header information.
|
||||
var prevRoot [32]byte
|
||||
for i := req.StartSlot; i < req.StartSlot+primitives.Slot(req.Count); i++ {
|
||||
maxBlobsForSlot := cfg.MaxBlobsPerBlock(i)
|
||||
parentRoot := prevRoot
|
||||
header := util.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{})
|
||||
header.Header.Slot = i
|
||||
header.Header.ParentRoot = parentRoot[:]
|
||||
bRoot, err := header.Header.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
prevRoot = bRoot
|
||||
// Send the maximum possible blobs per slot.
|
||||
for j := 0; j < maxBlobsForSlot; j++ {
|
||||
b := util.HydrateBlobSidecar(ðpb.BlobSidecar{})
|
||||
b.SignedBlockHeader = header
|
||||
b.Index = uint64(j)
|
||||
ro, err := blocks.NewROBlob(b)
|
||||
require.NoError(t, err)
|
||||
vro := blocks.NewVerifiedROBlob(ro)
|
||||
assert.NoError(t, WriteBlobSidecarChunk(stream, clock, p2.Encoding(), vro))
|
||||
}
|
||||
}
|
||||
})
|
||||
req := ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: slot,
|
||||
Count: uint64(params.BeaconConfig().SlotsPerEpoch) * 3,
|
||||
}
|
||||
maxDenebBlobs := cfg.MaxBlobsPerBlockAtEpoch(cfg.DenebForkEpoch)
|
||||
maxElectraBlobs := cfg.MaxBlobsPerBlockAtEpoch(cfg.ElectraForkEpoch)
|
||||
totalDenebBlobs := primitives.Slot(maxDenebBlobs) * params.BeaconConfig().SlotsPerEpoch
|
||||
totalElectraBlobs := primitives.Slot(maxElectraBlobs) * 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
totalExpectedBlobs := totalDenebBlobs + totalElectraBlobs
|
||||
|
||||
blobs, err := SendBlobsByRangeRequest(ctx, clock, p1, p2.PeerID(), ctxByte, req)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int(totalExpectedBlobs), len(blobs))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -81,19 +81,20 @@ func (s *Service) reconstructAndBroadcastBlobs(ctx context.Context, block interf
|
||||
if s.cfg.blobStorage == nil {
|
||||
return
|
||||
}
|
||||
indices, err := s.cfg.blobStorage.Indices(blockRoot, block.Block().Slot())
|
||||
summary := s.cfg.blobStorage.Summary(blockRoot)
|
||||
cmts, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to retrieve indices for block")
|
||||
log.WithError(err).Error("Failed to read commitments from block")
|
||||
return
|
||||
}
|
||||
for _, index := range indices {
|
||||
if index {
|
||||
for i := range cmts {
|
||||
if summary.HasIndex(uint64(i)) {
|
||||
blobExistedInDBTotal.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct blob sidecars from the EL
|
||||
blobSidecars, err := s.cfg.executionReconstructor.ReconstructBlobSidecars(ctx, block, blockRoot, indices)
|
||||
blobSidecars, err := s.cfg.executionReconstructor.ReconstructBlobSidecars(ctx, block, blockRoot, summary.HasIndex)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to reconstruct blob sidecars")
|
||||
return
|
||||
@@ -103,15 +104,12 @@ func (s *Service) reconstructAndBroadcastBlobs(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// Refresh indices as new blobs may have been added to the db
|
||||
indices, err = s.cfg.blobStorage.Indices(blockRoot, block.Block().Slot())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to retrieve indices for block")
|
||||
return
|
||||
}
|
||||
summary = s.cfg.blobStorage.Summary(blockRoot)
|
||||
|
||||
// Broadcast blob sidecars first than save them to the db
|
||||
for _, sidecar := range blobSidecars {
|
||||
if sidecar.Index >= uint64(len(indices)) || indices[sidecar.Index] {
|
||||
// Don't broadcast the blob if it has appeared on disk.
|
||||
if summary.HasIndex(sidecar.Index) {
|
||||
continue
|
||||
}
|
||||
if err := s.cfg.p2p.BroadcastBlob(ctx, sidecar.Index, sidecar.BlobSidecar); err != nil {
|
||||
@@ -120,8 +118,7 @@ func (s *Service) reconstructAndBroadcastBlobs(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
for _, sidecar := range blobSidecars {
|
||||
if sidecar.Index >= uint64(len(indices)) || indices[sidecar.Index] {
|
||||
blobExistedInDBTotal.Inc()
|
||||
if summary.HasIndex(sidecar.Index) {
|
||||
continue
|
||||
}
|
||||
if err := s.subscribeBlob(ctx, sidecar); err != nil {
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestMain(m *testing.M) {
|
||||
flags.Init(&flags.GlobalFlags{
|
||||
BlockBatchLimit: 64,
|
||||
BlockBatchLimitBurstFactor: 10,
|
||||
BlobBatchLimit: 8,
|
||||
BlobBatchLimit: 32,
|
||||
BlobBatchLimitBurstFactor: 2,
|
||||
})
|
||||
defer func() {
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
@@ -69,18 +68,12 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms
|
||||
|
||||
// Broadcast the aggregated attestation on a feed to notify other services in the beacon node
|
||||
// of a received aggregated attestation.
|
||||
// TODO: this will be extended to Electra in a later PR
|
||||
if m.Version() == version.Phase0 {
|
||||
phase0Att, ok := m.(*ethpb.SignedAggregateAttestationAndProof)
|
||||
if ok {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.AggregatedAttReceived,
|
||||
Data: &operation.AggregatedAttReceivedData{
|
||||
Attestation: phase0Att.Message,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.AggregatedAttReceived,
|
||||
Data: &operation.AggregatedAttReceivedData{
|
||||
Attestation: m.AggregateAttestationAndProof(),
|
||||
},
|
||||
})
|
||||
|
||||
if err := helpers.ValidateSlotTargetEpoch(data); err != nil {
|
||||
return pubsub.ValidationReject, err
|
||||
|
||||
@@ -139,8 +139,9 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
return validationRes, err
|
||||
}
|
||||
|
||||
var singleAtt *eth.SingleAttestation
|
||||
if att.Version() >= version.Electra {
|
||||
singleAtt, ok := att.(*eth.SingleAttestation)
|
||||
singleAtt, ok = att.(*eth.SingleAttestation)
|
||||
if !ok {
|
||||
return pubsub.ValidationIgnore, fmt.Errorf("attestation has wrong type (expected %T, got %T)", ð.SingleAttestation{}, att)
|
||||
}
|
||||
@@ -183,12 +184,21 @@ func (s *Service) validateCommitteeIndexBeaconAttestation(ctx context.Context, p
|
||||
|
||||
// Broadcast the unaggregated attestation on a feed to notify other services in the beacon node
|
||||
// of a received unaggregated attestation.
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
if singleAtt != nil {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.SingleAttReceived,
|
||||
Data: &operation.SingleAttReceivedData{
|
||||
Attestation: singleAtt,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
s.cfg.attestationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.UnaggregatedAttReceived,
|
||||
Data: &operation.UnAggregatedAttReceivedData{
|
||||
Attestation: att,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
s.setSeenCommitteeIndicesSlot(data.Slot, committeeIndex, att.GetAggregationBits())
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"cache.go",
|
||||
"error.go",
|
||||
"fake.go",
|
||||
"filesystem.go",
|
||||
"initializer.go",
|
||||
"interface.go",
|
||||
"log.go",
|
||||
@@ -40,6 +41,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package verification
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrFromFutureSlot means RequireSlotNotTooEarly failed.
|
||||
@@ -35,6 +38,10 @@ var (
|
||||
|
||||
// ErrMissingVerification indicates that the given verification function was never performed on the value.
|
||||
ErrMissingVerification = errors.New("verification was not performed for requirement")
|
||||
|
||||
// errVerificationImplementationFault indicates that a code path yielding VerifiedROBlobs has an implementation
|
||||
// error, leading it to call VerifiedROBlobError with a nil error.
|
||||
errVerificationImplementationFault = errors.New("could not verify blob data or create a valid VerifiedROBlob.")
|
||||
)
|
||||
|
||||
// VerificationMultiError is a custom error that can be used to access individual verification failures.
|
||||
@@ -68,3 +75,12 @@ func (ve VerificationMultiError) Failures() map[Requirement]error {
|
||||
func newVerificationMultiError(r *results, err error) VerificationMultiError {
|
||||
return VerificationMultiError{r: r, err: err}
|
||||
}
|
||||
|
||||
// VerifiedROBlobError can be used by methods that have a VerifiedROBlob return type but do not have permission to
|
||||
// create a value of that type in order to generate an error return value.
|
||||
func VerifiedROBlobError(err error) (blocks.VerifiedROBlob, error) {
|
||||
if err == nil {
|
||||
return blocks.VerifiedROBlob{}, errVerificationImplementationFault
|
||||
}
|
||||
return blocks.VerifiedROBlob{}, err
|
||||
}
|
||||
|
||||
@@ -6,22 +6,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
// BlobSidecarNoop is a FAKE verification function that simply launders a ROBlob->VerifiedROBlob.
|
||||
// TODO: find all code that uses this method and replace it with full verification.
|
||||
func BlobSidecarNoop(b blocks.ROBlob) (blocks.VerifiedROBlob, error) {
|
||||
return blocks.NewVerifiedROBlob(b), nil
|
||||
}
|
||||
|
||||
// BlobSidecarSliceNoop is a FAKE verification function that simply launders a ROBlob->VerifiedROBlob.
|
||||
// TODO: find all code that uses this method and replace it with full verification.
|
||||
func BlobSidecarSliceNoop(b []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
|
||||
vbs := make([]blocks.VerifiedROBlob, len(b))
|
||||
for i := range b {
|
||||
vbs[i] = blocks.NewVerifiedROBlob(b[i])
|
||||
}
|
||||
return vbs, nil
|
||||
}
|
||||
|
||||
// FakeVerifyForTest can be used by tests that need a VerifiedROBlob but don't want to do all the
|
||||
// expensive set up to perform full validation.
|
||||
func FakeVerifyForTest(t *testing.T, b blocks.ROBlob) blocks.VerifiedROBlob {
|
||||
@@ -35,7 +19,6 @@ func FakeVerifyForTest(t *testing.T, b blocks.ROBlob) blocks.VerifiedROBlob {
|
||||
func FakeVerifySliceForTest(t *testing.T, b []blocks.ROBlob) []blocks.VerifiedROBlob {
|
||||
// log so that t is truly required
|
||||
t.Log("producing fake []VerifiedROBlob for a test")
|
||||
// tautological assertion that ensures this function can only be used in tests.
|
||||
vbs := make([]blocks.VerifiedROBlob, len(b))
|
||||
for i := range b {
|
||||
vbs[i] = blocks.NewVerifiedROBlob(b[i])
|
||||
|
||||
23
beacon-chain/verification/filesystem.go
Normal file
23
beacon-chain/verification/filesystem.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
func VerifiedROBlobFromDisk(fs afero.Fs, root [32]byte, path string) (blocks.VerifiedROBlob, error) {
|
||||
encoded, err := afero.ReadFile(fs, path)
|
||||
if err != nil {
|
||||
return VerifiedROBlobError(err)
|
||||
}
|
||||
s := ðpb.BlobSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return VerifiedROBlobError(err)
|
||||
}
|
||||
ro, err := blocks.NewROBlobWithRoot(s, root)
|
||||
if err != nil {
|
||||
return VerifiedROBlobError(err)
|
||||
}
|
||||
return blocks.NewVerifiedROBlob(ro), nil
|
||||
}
|
||||
3
changelog/dB2510_processslotspan.md
Normal file
3
changelog/dB2510_processslotspan.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Removes existing validator.processSlot span and adds validator.processSlot span to slotCtx.
|
||||
3
changelog/james-prysm_fix-block-api-electra.md
Normal file
3
changelog/james-prysm_fix-block-api-electra.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- fix block api endpoint to handle blocks with the same structure but on different forks (i.e. fulu and electra)
|
||||
3
changelog/james-prysm_nil-check-to-consensus.md
Normal file
3
changelog/james-prysm_nil-check-to-consensus.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- add more nil checks on ToConsensus functions for added safety
|
||||
3
changelog/james-prysm_proposer-settings-readability.md
Normal file
3
changelog/james-prysm_proposer-settings-readability.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- improving readability on proposer settings loader
|
||||
2
changelog/kasey_blobs-by-epoch.md
Normal file
2
changelog/kasey_blobs-by-epoch.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- New option to select an alternate blob storage layout. Rather than a flat directory with a subdir for each block root, a multi-level scheme is used to organize blobs by epoch/slot/root, enabling leaner syscalls, indexing and pruning.
|
||||
2
changelog/kasey_refactor-checkpoint-download.md
Normal file
2
changelog/kasey_refactor-checkpoint-download.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- DownloadFinalizedData has moved from the api/client package to beacon-chain/sync/checkpoint
|
||||
3
changelog/nisdas_fix_blobs_by_range.md
Normal file
3
changelog/nisdas_fix_blobs_by_range.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- We now use the correct maximum value when serving blobs for electra blocks.
|
||||
4
changelog/nisdas_fix_mutating_blob_mask.md
Normal file
4
changelog/nisdas_fix_mutating_blob_mask.md
Normal file
@@ -0,0 +1,4 @@
|
||||
### Fixed
|
||||
|
||||
- We change how we track blob indexes during their reconstruction from the EL to prevent
|
||||
a mutating blob mask from causing invalid sidecars from being created.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user