mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
19 Commits
blob-verif
...
attestatio
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3773ad796 | ||
|
|
ce37952217 | ||
|
|
2119b0ec28 | ||
|
|
6c85587d14 | ||
|
|
6daa72634d | ||
|
|
07ee42660a | ||
|
|
4b5db8003b | ||
|
|
4b3c511a26 | ||
|
|
8902ad3a20 | ||
|
|
1123df7432 | ||
|
|
a7edec9b98 | ||
|
|
10ccf1840f | ||
|
|
d035be29cd | ||
|
|
bba8dd6f1e | ||
|
|
8f5ae760ee | ||
|
|
5ba91a5216 | ||
|
|
06dcc5396c | ||
|
|
06115705ac | ||
|
|
617996fec9 |
@@ -6,8 +6,6 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
@@ -179,24 +177,6 @@ func (g *Gateway) corsMiddleware(h http.Handler) http.Handler {
|
||||
return c.Handler(h)
|
||||
}
|
||||
|
||||
const swaggerDir = "proto/prysm/v1alpha1/"
|
||||
|
||||
// SwaggerServer returns swagger specification files located under "/swagger/"
|
||||
func SwaggerServer() http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.HasSuffix(r.URL.Path, ".swagger.json") {
|
||||
log.Debugf("Not found: %s", r.URL.Path)
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("Serving %s\n", r.URL.Path)
|
||||
p := strings.TrimPrefix(r.URL.Path, "/swagger/")
|
||||
p = path.Join(swaggerDir, p)
|
||||
http.ServeFile(w, r, p)
|
||||
}
|
||||
}
|
||||
|
||||
// dial the gRPC server.
|
||||
func (g *Gateway) dial(ctx context.Context, network, addr string) (*grpc.ClientConn, error) {
|
||||
switch network {
|
||||
|
||||
@@ -50,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
|
||||
@@ -530,3 +530,10 @@ func (s *Service) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (
|
||||
func (s *Service) BlockBeingSynced(root [32]byte) bool {
|
||||
return s.blockBeingSynced.isSyncing(root)
|
||||
}
|
||||
|
||||
// RecentBlockSlot returns block slot form fork choice store
|
||||
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.Slot(root)
|
||||
}
|
||||
|
||||
@@ -30,6 +30,8 @@ var (
|
||||
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
// Some examples of invalid blocks are:
|
||||
|
||||
@@ -147,15 +147,3 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlobSidecar(scs []*ethpb.DeprecatedBlobSidecar, startTime time.Time) {
|
||||
if len(scs) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"count": len(scs),
|
||||
"slot": scs[0].Slot,
|
||||
"block": hex.EncodeToString(scs[0].BlockRoot),
|
||||
"validationTime": time.Since(startTime),
|
||||
}).Debug("Synced new blob sidecars")
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
@@ -164,6 +165,8 @@ func WithFinalizedStateAtStartUp(st state.BeaconState) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithClockSychronizer sets the ClockSetter/ClockWaiter values to be used by services that need to block until
|
||||
// the genesis timestamp is known (ClockWaiter) or which determine the genesis timestamp (ClockSetter).
|
||||
func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||
return func(s *Service) error {
|
||||
s.clockSetter = gs
|
||||
@@ -172,9 +175,18 @@ func WithClockSynchronizer(gs *startup.ClockSynchronizer) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithSyncComplete sets a channel that is used to notify blockchain service that the node has synced to head.
|
||||
func WithSyncComplete(c chan struct{}) Option {
|
||||
return func(s *Service) error {
|
||||
s.syncComplete = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobStorage sets the blob storage backend for the blockchain service.
|
||||
func WithBlobStorage(b *filesystem.BlobStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.blobStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -145,6 +146,61 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := context.Background()
|
||||
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
err = s.SetFinalizedCheckpoint(ðpb.Checkpoint{Root: ckRoot})
|
||||
require.NoError(t, err)
|
||||
val := ðpb.Validator{PublicKey: bytesutil.PadTo([]byte("foo"), 48), WithdrawalCredentials: bytesutil.PadTo([]byte("bar"), fieldparams.RootLength)}
|
||||
err = s.SetValidators([]*ethpb.Validator{val})
|
||||
require.NoError(t, err)
|
||||
err = s.SetBalances([]uint64{0})
|
||||
require.NoError(t, err)
|
||||
r := [32]byte{'g'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, r))
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: ckRoot}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: ckRoot}))
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1000)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: ckRoot}
|
||||
_, err := service.getAttPreState(ctx, cp1)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("Test timed out")
|
||||
case err, ok := <-errChan:
|
||||
if ok && err != nil {
|
||||
require.ErrorContains(t, "not a checkpoint in forkchoice", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
@@ -6,17 +6,17 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -353,11 +353,15 @@ func (s *Service) databaseDACheck(ctx context.Context, b consensusblocks.ROBlock
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, b.Root())
|
||||
missing, err := missingIndices(s.blobStorage, b.Root(), commitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
return err
|
||||
}
|
||||
return kzg.IsDataAvailable(commitments, sidecars)
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
// TODO: don't worry that this error isn't informative, it will be superceded by a detailed sidecar cache error.
|
||||
return errors.New("not all kzg commitments are available")
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
@@ -529,11 +533,43 @@ func (s *Service) runLateBlockTasks() {
|
||||
}
|
||||
}
|
||||
|
||||
// missingIndices uses the expected commitments from the block to determine
|
||||
// which BlobSidecar indices would need to be in the database for DA success.
|
||||
// It returns a map where each key represents a missing BlobSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// BlobSidecars against the set of known missing sidecars.
|
||||
func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte) (map[uint64]struct{}, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(expected) > fieldparams.MaxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
ui := uint64(i)
|
||||
if len(expected[i]) > 0 {
|
||||
if !indices[i] {
|
||||
missing[ui] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
t := time.Now()
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
@@ -552,59 +588,35 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get KZG commitments")
|
||||
}
|
||||
// expected is the number of kzg commitments observed in the block.
|
||||
expected := len(kzgCommitments)
|
||||
if expected == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read first from db in case we have the blobs
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
switch {
|
||||
case err == nil:
|
||||
if len(sidecars) >= expected {
|
||||
s.blobNotifiers.delete(root)
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
|
||||
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
|
||||
log.WithError(err2).Error("could not delete sidecars")
|
||||
}
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
return nil
|
||||
}
|
||||
case errors.Is(err, db.ErrNotFound):
|
||||
// If the blob sidecars haven't arrived yet, the subsequent code will wait for them.
|
||||
// Note: The system will not exit with an error in this scenario.
|
||||
default:
|
||||
log.WithError(err).Error("could not get blob sidecars from DB")
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
found := map[uint64]struct{}{}
|
||||
for _, sc := range sidecars {
|
||||
found[sc.Index] = struct{}{}
|
||||
}
|
||||
// The gossip handler for blobs writes the index of each verified blob referencing the given
|
||||
// root to the channel returned by blobNotifiers.forRoot.
|
||||
nc := s.blobNotifiers.forRoot(root)
|
||||
for {
|
||||
select {
|
||||
case idx := <-nc:
|
||||
found[idx] = struct{}{}
|
||||
if len(found) != expected {
|
||||
// Delete each index seen in the notification channel.
|
||||
delete(missing, idx)
|
||||
// Read from the channel until there are no more missing sidecars.
|
||||
if len(missing) > 0 {
|
||||
continue
|
||||
}
|
||||
// Once all sidecars have been observed, clean up the notification channel.
|
||||
s.blobNotifiers.delete(root)
|
||||
sidecars, err := s.cfg.BeaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob sidecars")
|
||||
}
|
||||
if err := kzg.IsDataAvailable(kzgCommitments, sidecars); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", root)).Warn("removing blob sidecars with invalid proofs")
|
||||
if err2 := s.cfg.BeaconDB.DeleteBlobSidecars(ctx, root); err2 != nil {
|
||||
log.WithError(err2).Error("could not delete sidecars")
|
||||
}
|
||||
return err
|
||||
}
|
||||
logBlobSidecar(sidecars, t)
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return errors.Wrap(ctx.Err(), "context deadline waiting for blob sidecars")
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
testDB "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
@@ -2115,3 +2116,99 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMissingIndices(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
expected [][]byte
|
||||
present []uint64
|
||||
result map[uint64]struct{}
|
||||
root [32]byte
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "zero len",
|
||||
},
|
||||
{
|
||||
name: "expected exceeds max",
|
||||
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock + 1),
|
||||
err: errMaxBlobsExceeded,
|
||||
},
|
||||
{
|
||||
name: "first missing",
|
||||
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
|
||||
present: []uint64{1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{0}),
|
||||
},
|
||||
{
|
||||
name: "all missing",
|
||||
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
|
||||
result: fakeResult([]uint64{0, 1, 2, 3, 4, 5}),
|
||||
},
|
||||
{
|
||||
name: "none missing",
|
||||
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
|
||||
present: []uint64{0, 1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{}),
|
||||
},
|
||||
{
|
||||
name: "one commitment, missing",
|
||||
expected: fakeCommitments(1),
|
||||
present: []uint64{},
|
||||
result: fakeResult([]uint64{0}),
|
||||
},
|
||||
{
|
||||
name: "3 commitments, 1 missing",
|
||||
expected: fakeCommitments(3),
|
||||
present: []uint64{1},
|
||||
result: fakeResult([]uint64{0, 2}),
|
||||
},
|
||||
{
|
||||
name: "3 commitments, none missing",
|
||||
expected: fakeCommitments(3),
|
||||
present: []uint64{0, 1, 2},
|
||||
result: fakeResult([]uint64{}),
|
||||
},
|
||||
{
|
||||
name: "3 commitments, all missing",
|
||||
expected: fakeCommitments(3),
|
||||
present: []uint64{},
|
||||
result: fakeResult([]uint64{0, 1, 2}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present))
|
||||
missing, err := missingIndices(bs, c.root, c.expected)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(c.result), len(missing))
|
||||
for key := range c.result {
|
||||
m, ok := missing[key]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, c.result[key], m)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func fakeCommitments(n int) [][]byte {
|
||||
f := make([][]byte, n)
|
||||
for i := range f {
|
||||
f[i] = make([]byte, 48)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func fakeResult(missing []uint64) map[uint64]struct{} {
|
||||
r := make(map[uint64]struct{}, len(missing))
|
||||
for i := range missing {
|
||||
r[missing[i]] = struct{}{}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
)
|
||||
|
||||
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
|
||||
@@ -13,11 +13,11 @@ func (s *Service) sendNewBlobEvent(root [32]byte, index uint64) {
|
||||
}
|
||||
|
||||
// ReceiveBlob saves the blob to database and sends the new event
|
||||
func (s *Service) ReceiveBlob(ctx context.Context, b *ethpb.DeprecatedBlobSidecar) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlobSidecar(ctx, []*ethpb.DeprecatedBlobSidecar{b}); err != nil {
|
||||
func (s *Service) ReceiveBlob(ctx context.Context, b blocks.VerifiedROBlob) error {
|
||||
if err := s.blobStorage.Save(b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.sendNewBlobEvent([32]byte(b.BlockRoot), b.Index)
|
||||
s.sendNewBlobEvent(b.BlockRoot(), b.Index)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ type BlockReceiver interface {
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
// blobs
|
||||
type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, *ethpb.DeprecatedBlobSidecar) error
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
@@ -258,11 +258,6 @@ func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
return s.hasBlockInInitSyncOrDB(ctx, root)
|
||||
}
|
||||
|
||||
// RecentBlockSlot returns block slot form fork choice store
|
||||
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
|
||||
return s.cfg.ForkChoiceStore.Slot(root)
|
||||
}
|
||||
|
||||
// ReceiveAttesterSlashing receives an attester slashing and inserts it to forkchoice
|
||||
func (s *Service) ReceiveAttesterSlashing(ctx context.Context, slashing *ethpb.AttesterSlashing) {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
f "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/types"
|
||||
@@ -63,6 +64,7 @@ type Service struct {
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
|
||||
@@ -56,7 +56,7 @@ func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ ui
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
|
||||
func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ type ChainService struct {
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []*ethpb.DeprecatedBlobSidecar
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -613,7 +613,7 @@ func (c *ChainService) BlockBeingSynced(root [32]byte) bool {
|
||||
}
|
||||
|
||||
// ReceiveBlob implements the same method in the chain service
|
||||
func (c *ChainService) ReceiveBlob(_ context.Context, b *ethpb.DeprecatedBlobSidecar) error {
|
||||
func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) error {
|
||||
c.Blobs = append(c.Blobs, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
package db
|
||||
|
||||
import "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
)
|
||||
|
||||
// ErrNotFound can be used to determine if an error from a method in the database package
|
||||
// represents a "not found" error. These often require different handling than a low-level
|
||||
@@ -19,3 +24,9 @@ var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
|
||||
|
||||
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
|
||||
var ErrNotFoundGenesisBlockRoot = kv.ErrNotFoundGenesisBlockRoot
|
||||
|
||||
// IsNotFound allows callers to treat errors from a flat-file database, where the file record is missing,
|
||||
// as equivalent to db.ErrNotFound.
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Is(err, ErrNotFound) || os.IsNotExist(err)
|
||||
}
|
||||
|
||||
@@ -2,27 +2,36 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["save_blob.go"],
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"ephemeral.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:private"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/logging:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["save_blob_test.go"],
|
||||
srcs = ["blob_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
179
beacon-chain/db/filesystem/blob.go
Normal file
179
beacon-chain/db/filesystem/blob.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/io/file"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/logging"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
var (
|
||||
errIndexOutOfBounds = errors.New("blob index in file name > MaxBlobsPerBlock")
|
||||
)
|
||||
|
||||
const (
|
||||
sszExt = "ssz"
|
||||
partExt = "part"
|
||||
|
||||
directoryPermissions = 0700
|
||||
)
|
||||
|
||||
// NewBlobStorage creates a new instance of the BlobStorage object. Note that the implementation of BlobStorage may
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(base string) (*BlobStorage, error) {
|
||||
base = path.Clean(base)
|
||||
if err := file.MkdirAll(base); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs := afero.NewBasePathFs(afero.NewOsFs(), base)
|
||||
return &BlobStorage{fs: fs}, nil
|
||||
}
|
||||
|
||||
// BlobStorage is the concrete implementation of the filesystem backend for saving and retrieving BlobSidecars.
|
||||
type BlobStorage struct {
|
||||
fs afero.Fs
|
||||
}
|
||||
|
||||
// Save saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
fname := namerForSidecar(sidecar)
|
||||
sszPath := fname.path()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
log.WithFields(logging.BlobFields(sidecar.ROBlob)).Debug("ignoring a duplicate blob sidecar Save attempt")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serialize the ethpb.BlobSidecar to binary data using SSZ.
|
||||
sidecarData, err := sidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
}
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath()
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
_, err = partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
err = partialFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
var v blocks.VerifiedROBlob
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
s := ðpb.BlobSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return v, err
|
||||
}
|
||||
ro, err := blocks.NewROBlobWithRoot(s, root)
|
||||
if err != nil {
|
||||
return blocks.VerifiedROBlob{}, err
|
||||
}
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// Indices generates a bitmap representing which BlobSidecar.Index values are present on disk for a given root.
|
||||
// This value can be compared to the commitments observed in a block to determine which indices need to be found
|
||||
// on the network to confirm data availability.
|
||||
func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]bool, error) {
|
||||
var mask [fieldparams.MaxBlobsPerBlock]bool
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return mask, nil
|
||||
}
|
||||
return mask, err
|
||||
}
|
||||
for i := range entries {
|
||||
if entries[i].IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entries[i].Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
u, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return mask, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
if u >= fieldparams.MaxBlobsPerBlock {
|
||||
return mask, errIndexOutOfBounds
|
||||
}
|
||||
mask[u] = true
|
||||
}
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
type blobNamer struct {
|
||||
root [32]byte
|
||||
index uint64
|
||||
}
|
||||
|
||||
func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return fmt.Sprintf("%#x", p.root)
|
||||
}
|
||||
|
||||
func (p blobNamer) fname(ext string) string {
|
||||
return path.Join(p.dir(), fmt.Sprintf("%d.%s", p.index, ext))
|
||||
}
|
||||
|
||||
func (p blobNamer) partPath() string {
|
||||
return p.fname(partExt)
|
||||
}
|
||||
|
||||
func (p blobNamer) path() string {
|
||||
return p.fname(sszExt)
|
||||
}
|
||||
100
beacon-chain/db/filesystem/blob_test.go
Normal file
100
beacon-chain/db/filesystem/blob_test.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/spf13/afero"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, fieldparams.MaxBlobsPerBlock)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
existingSidecar := testSidecars[0]
|
||||
|
||||
blobPath := namerForSidecar(existingSidecar).path()
|
||||
// Serialize the existing BlobSidecar to binary data.
|
||||
existingSidecarData, err := ssz.MarshalSSZ(existingSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, bs.Save(existingSidecar))
|
||||
// No error when attempting to write twice.
|
||||
require.NoError(t, bs.Save(existingSidecar))
|
||||
|
||||
content, err := afero.ReadFile(fs, blobPath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(existingSidecarData, content))
|
||||
|
||||
// Deserialize the BlobSidecar from the saved file data.
|
||||
savedSidecar := ðpb.BlobSidecar{}
|
||||
err = savedSidecar.UnmarshalSSZ(content)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compare the original Sidecar and the saved Sidecar.
|
||||
require.DeepSSZEqual(t, existingSidecar.BlobSidecar, savedSidecar)
|
||||
|
||||
})
|
||||
t.Run("indices", func(t *testing.T) {
|
||||
bs := NewEphemeralBlobStorage(t)
|
||||
sc := testSidecars[2]
|
||||
require.NoError(t, bs.Save(sc))
|
||||
actualSc, err := bs.Get(sc.BlockRoot(), sc.Index)
|
||||
require.NoError(t, err)
|
||||
expectedIdx := [fieldparams.MaxBlobsPerBlock]bool{false, false, true}
|
||||
actualIdx, err := bs.Indices(actualSc.BlockRoot())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedIdx, actualIdx)
|
||||
})
|
||||
|
||||
t.Run("round trip write then read", func(t *testing.T) {
|
||||
bs := NewEphemeralBlobStorage(t)
|
||||
err := bs.Save(testSidecars[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := testSidecars[0]
|
||||
actual, err := bs.Get(expected.BlockRoot(), expected.Index)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageWithFs(t)
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(fieldparams.MaxBlobsPerBlock - 1)
|
||||
writeFakeSSZ(t, fs, root, okIdx)
|
||||
indices, err := bs.Indices(root)
|
||||
require.NoError(t, err)
|
||||
var expected [fieldparams.MaxBlobsPerBlock]bool
|
||||
expected[okIdx] = true
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], indices[i])
|
||||
}
|
||||
|
||||
oobIdx := uint64(fieldparams.MaxBlobsPerBlock)
|
||||
writeFakeSSZ(t, fs, root, oobIdx)
|
||||
_, err = bs.Indices(root)
|
||||
require.ErrorIs(t, err, errIndexOutOfBounds)
|
||||
}
|
||||
|
||||
func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, idx uint64) {
|
||||
namer := blobNamer{root: root, index: idx}
|
||||
require.NoError(t, fs.MkdirAll(namer.dir(), 0700))
|
||||
fh, err := fs.Create(namer.path())
|
||||
require.NoError(t, err)
|
||||
_, err = fh.Write([]byte("derp"))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fh.Close())
|
||||
}
|
||||
53
beacon-chain/db/filesystem/ephemeral.go
Normal file
53
beacon-chain/db/filesystem/ephemeral.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// NewEphemeralBlobStorage should only be used for tests.
|
||||
// The instance of BlobStorage returned is backed by an in-memory virtual filesystem,
|
||||
// improving test performance and simplifying cleanup.
|
||||
func NewEphemeralBlobStorage(_ testing.TB) *BlobStorage {
|
||||
return &BlobStorage{fs: afero.NewMemMapFs()}
|
||||
}
|
||||
|
||||
// NewEphemeralBlobStorageWithFs can be used by tests that want access to the virtual filesystem
|
||||
// in order to interact with it outside the parameters of the BlobStorage api.
|
||||
func NewEphemeralBlobStorageWithFs(_ testing.TB) (afero.Fs, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
return fs, &BlobStorage{fs: fs}
|
||||
}
|
||||
|
||||
type BlobMocker struct {
|
||||
fs afero.Fs
|
||||
bs *BlobStorage
|
||||
}
|
||||
|
||||
// CreateFakeIndices creates empty blob sidecar files at the expected path for the given
|
||||
// root and indices to influence the result of Indices().
|
||||
func (bm *BlobMocker) CreateFakeIndices(root [32]byte, indices []uint64) error {
|
||||
for i := range indices {
|
||||
n := blobNamer{root: root, index: indices[i]}
|
||||
if err := bm.fs.MkdirAll(n.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := bm.fs.Create(n.path())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewEpehmeralBlobStorageWithMocker returns a *BlobMocker value in addition to the BlobStorage value.
|
||||
// BlockMocker encapsulates things blob path construction to avoid leaking implementation details.
|
||||
func NewEphemeralBlobStorageWithMocker(_ testing.TB) (*BlobMocker, *BlobStorage) {
|
||||
fs := afero.NewMemMapFs()
|
||||
bs := &BlobStorage{fs: fs}
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v4/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
)
|
||||
|
||||
type BlobStorage struct {
|
||||
baseDir string
|
||||
}
|
||||
|
||||
// SaveBlobData saves blobs given a list of sidecars.
|
||||
func (bs *BlobStorage) SaveBlobData(sidecars []*eth.BlobSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return errors.New("no blob data to save")
|
||||
}
|
||||
for _, sidecar := range sidecars {
|
||||
blobPath := bs.sidecarFileKey(sidecar)
|
||||
exists := file.Exists(blobPath)
|
||||
if exists {
|
||||
if err := checkDataIntegrity(sidecar, blobPath); err != nil {
|
||||
// This error should never happen, if it does then the
|
||||
// file has most likely been tampered with.
|
||||
return errors.Wrapf(err, "failed to save blob sidecar, tried to overwrite blob (%s) with different content", blobPath)
|
||||
}
|
||||
continue // Blob already exists, move to the next one
|
||||
}
|
||||
|
||||
// Serialize the ethpb.BlobSidecar to binary data using SSZ.
|
||||
sidecarData, err := ssz.MarshalSSZ(sidecar)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
}
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFilePath := blobPath + ".partial"
|
||||
partialFile, err := os.Create(filepath.Clean(partialFilePath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
_, err = partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
err = partialFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = os.Rename(partialFilePath, blobPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bs *BlobStorage) sidecarFileKey(sidecar *eth.BlobSidecar) string {
|
||||
return path.Join(bs.baseDir, fmt.Sprintf(
|
||||
"%d_%x_%d_%x.blob",
|
||||
sidecar.Slot,
|
||||
sidecar.BlockRoot,
|
||||
sidecar.Index,
|
||||
sidecar.KzgCommitment,
|
||||
))
|
||||
}
|
||||
|
||||
// checkDataIntegrity checks the data integrity by comparing the original ethpb.BlobSidecar.
|
||||
func checkDataIntegrity(sidecar *eth.BlobSidecar, filePath string) error {
|
||||
sidecarData, err := ssz.MarshalSSZ(sidecar)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
}
|
||||
originalChecksum := sha256.Sum256(sidecarData)
|
||||
savedFileChecksum, err := file.HashFile(filePath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to calculate saved file checksum")
|
||||
}
|
||||
if hex.EncodeToString(originalChecksum[:]) != hex.EncodeToString(savedFileChecksum) {
|
||||
return errors.New("data integrity check failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
testSidecars := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
t.Run("NoBlobData", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
bs := &BlobStorage{baseDir: tempDir}
|
||||
err := bs.SaveBlobData([]*eth.BlobSidecar{})
|
||||
require.ErrorContains(t, "no blob data to save", err)
|
||||
})
|
||||
|
||||
t.Run("BlobExists", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
bs := &BlobStorage{baseDir: tempDir}
|
||||
existingSidecar := testSidecars[0]
|
||||
|
||||
blobPath := bs.sidecarFileKey(existingSidecar)
|
||||
// Serialize the existing BlobSidecar to binary data.
|
||||
existingSidecarData, err := ssz.MarshalSSZ(existingSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.MkdirAll(path.Dir(blobPath), os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Write the serialized data to the blob file.
|
||||
err = os.WriteFile(blobPath, existingSidecarData, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bs.SaveBlobData([]*eth.BlobSidecar{existingSidecar})
|
||||
require.NoError(t, err)
|
||||
|
||||
content, err := os.ReadFile(blobPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Deserialize the BlobSidecar from the saved file data.
|
||||
var savedSidecar ssz.Unmarshaler
|
||||
savedSidecar = ð.BlobSidecar{}
|
||||
err = savedSidecar.UnmarshalSSZ(content)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compare the original Sidecar and the saved Sidecar.
|
||||
require.DeepSSZEqual(t, existingSidecar, savedSidecar)
|
||||
})
|
||||
|
||||
t.Run("SaveBlobDataNoErrors", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
bs := &BlobStorage{baseDir: tempDir}
|
||||
err := bs.SaveBlobData(testSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check the number of files in the directory.
|
||||
files, err := os.ReadDir(tempDir)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(testSidecars), len(files))
|
||||
|
||||
for _, f := range files {
|
||||
content, err := os.ReadFile(path.Join(tempDir, f.Name()))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Deserialize the BlobSidecar from the saved file data.
|
||||
var savedSidecar ssz.Unmarshaler
|
||||
savedSidecar = ð.BlobSidecar{}
|
||||
err = savedSidecar.UnmarshalSSZ(content)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Find the corresponding test sidecar based on the file name.
|
||||
sidecar := findTestSidecarsByFileName(t, testSidecars, f.Name())
|
||||
require.NotNil(t, sidecar)
|
||||
// Compare the original Sidecar and the saved Sidecar.
|
||||
require.DeepSSZEqual(t, sidecar, savedSidecar)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("OverwriteBlobWithDifferentContent", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
bs := &BlobStorage{baseDir: tempDir}
|
||||
originalSidecar := []*eth.BlobSidecar{testSidecars[0]}
|
||||
// Save the original sidecar
|
||||
err := bs.SaveBlobData(originalSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Modify the blob data
|
||||
modifiedSidecar := originalSidecar
|
||||
modifiedSidecar[0].Blob = []byte("Modified Blob Data")
|
||||
|
||||
err = bs.SaveBlobData(modifiedSidecar)
|
||||
require.ErrorContains(t, "failed to save blob sidecar, tried to overwrite blob", err)
|
||||
})
|
||||
}
|
||||
|
||||
func findTestSidecarsByFileName(t *testing.T, testSidecars []*eth.BlobSidecar, fileName string) *eth.BlobSidecar {
|
||||
parts := strings.SplitN(fileName, ".", 2)
|
||||
require.Equal(t, 2, len(parts))
|
||||
// parts[0] contains the substring before the first period
|
||||
components := strings.Split(parts[0], "_")
|
||||
if len(components) == 4 {
|
||||
blockRoot, err := hex.DecodeString(components[1])
|
||||
require.NoError(t, err)
|
||||
kzgCommitment, err := hex.DecodeString(components[3])
|
||||
require.NoError(t, err)
|
||||
for _, sidecar := range testSidecars {
|
||||
if bytes.Equal(sidecar.BlockRoot, blockRoot) && bytes.Equal(sidecar.KzgCommitment, kzgCommitment) {
|
||||
return sidecar
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestCheckDataIntegrity(t *testing.T) {
|
||||
testSidecars := generateBlobSidecars(t, fieldparams.MaxBlobsPerBlock)
|
||||
originalData, err := ssz.MarshalSSZ(testSidecars[0])
|
||||
require.NoError(t, err)
|
||||
originalChecksum := sha256.Sum256(originalData)
|
||||
|
||||
tempDir := t.TempDir()
|
||||
tempfile, err := os.CreateTemp(tempDir, "testfile")
|
||||
require.NoError(t, err)
|
||||
_, err = tempfile.Write(originalData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = checkDataIntegrity(testSidecars[0], tempfile.Name())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Modify the data in the file to simulate data corruption
|
||||
corruptedData := []byte("corrupted data")
|
||||
err = os.WriteFile(tempfile.Name(), corruptedData, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test data integrity check with corrupted data
|
||||
err = checkDataIntegrity(testSidecars[0], tempfile.Name())
|
||||
require.ErrorContains(t, "data integrity check failed", err)
|
||||
|
||||
// Modify the calculated checksum to be incorrect
|
||||
wrongChecksum := hex.EncodeToString(originalChecksum[:]) + "12345"
|
||||
err = os.WriteFile(tempfile.Name(), []byte(wrongChecksum), os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
checksum, err := file.HashFile(tempfile.Name())
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, wrongChecksum, hex.EncodeToString(checksum))
|
||||
}
|
||||
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*eth.BlobSidecar {
|
||||
blobSidecars := make([]*eth.BlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateBlobSidecar(t *testing.T, index uint64) *eth.BlobSidecar {
|
||||
blob := make([]byte, 131072)
|
||||
_, err := rand.Read(blob)
|
||||
require.NoError(t, err)
|
||||
kzgCommitment := make([]byte, 48)
|
||||
_, err = rand.Read(kzgCommitment)
|
||||
require.NoError(t, err)
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
return ð.BlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
Index: index,
|
||||
Slot: 100,
|
||||
BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
|
||||
ProposerIndex: 101,
|
||||
Blob: blob,
|
||||
KzgCommitment: kzgCommitment,
|
||||
KzgProof: kzgProof,
|
||||
}
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
var maxEpochsToPersistBlobs = params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
|
||||
// ConfigureBlobRetentionEpoch sets the for blob retention based on command-line context. It sets the local config `maxEpochsToPersistBlobs`.
|
||||
// ConfigureBlobRetentionEpoch sets the epoch for blob retention based on command-line context. It sets the local config `maxEpochsToPersistBlobs`.
|
||||
// If the flag is not set, the spec default `MinEpochsForBlobsSidecarsRequest` is used.
|
||||
// An error if the input epoch is smaller than the spec default value.
|
||||
func ConfigureBlobRetentionEpoch(cliCtx *cli.Context) error {
|
||||
|
||||
@@ -475,7 +475,7 @@ func (f *ForkChoice) CommonAncestor(ctx context.Context, r1 [32]byte, r2 [32]byt
|
||||
// `blocks`. This slice must be ordered from child to parent. It includes all
|
||||
// blocks **except** the first one (that is the one with the highest slot
|
||||
// number). All blocks are assumed to be a strict chain
|
||||
// where blocks[i].Parent = blocks[i+1]. Also we assume that the parent of the
|
||||
// where blocks[i].Parent = blocks[i+1]. Also, we assume that the parent of the
|
||||
// last block in this list is already included in forkchoice store.
|
||||
func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.BlockAndCheckpoints) error {
|
||||
if len(chain) == 0 {
|
||||
|
||||
@@ -780,9 +780,9 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
st, broot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
|
||||
st, bRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, broot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, bRoot))
|
||||
|
||||
// Epoch start
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
|
||||
@@ -794,55 +794,55 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// No Children but impossible checkpoint
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
st, croot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, broot, [32]byte{'C'}, 0, 0)
|
||||
st, cRoot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, bRoot, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, croot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, cRoot))
|
||||
|
||||
// Children in same epoch
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
st, droot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, broot, [32]byte{'D'}, 0, 0)
|
||||
st, dRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, bRoot, [32]byte{'D'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, droot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, dRoot))
|
||||
|
||||
// Children in next epoch but boundary
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// Boundary block
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: dRoot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: droot, Epoch: 0})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: dRoot, Epoch: 0})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// Children in next epoch
|
||||
st, eroot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, broot, [32]byte{'E'}, 0, 0)
|
||||
st, eRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, bRoot, [32]byte{'E'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, eroot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, eRoot))
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: broot, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
// Input child is the best descendant
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
@@ -99,7 +99,7 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
|
||||
func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
// Input child is the best descendant
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
@@ -119,7 +119,7 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is best descendant
|
||||
// Input child is the best descendant
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -156,9 +156,9 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// Ancestors have the added weights of their children. Genesis is a special exception at 0 weight,
|
||||
require.Equal(t, f.store.treeRootNode.weight, uint64(0))
|
||||
|
||||
// Proposer boost score with this tests parameters is 8
|
||||
// Proposer boost score with these test parameters is 8
|
||||
// Each of the nodes received one attestation accounting for 10.
|
||||
// Node D is the only one with a proposer boost still applied:
|
||||
// Node D is the only one with proposer boost still applied:
|
||||
//
|
||||
// (1: 48) -> (2: 38) -> (3: 10)
|
||||
// \--------------->(4: 18)
|
||||
|
||||
@@ -28,7 +28,7 @@ const orphanLateBlockProposingEarly = 2
|
||||
// 3- The beacon node is serving a validator that will propose during the next
|
||||
// slot.
|
||||
//
|
||||
// This function only applies an heuristic to decide if the beacon will update
|
||||
// This function only applies a heuristic to decide if the beacon will update
|
||||
// the engine's view of head with the parent block or the incoming block. It
|
||||
// does not guarantee an attempted reorg. This will only be decided later at
|
||||
// proposal time by calling GetProposerHead.
|
||||
|
||||
@@ -61,7 +61,7 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
}
|
||||
|
||||
// insert registers a new block node to the fork choice store's node list.
|
||||
// It then updates the new node's parent with best child and descendant node.
|
||||
// It then updates the new node's parent with the best child and descendant node.
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
root, parentRoot, payloadHash [fieldparams.RootLength]byte,
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestStore_Head_Itself(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(context.Background(), state, blkRoot))
|
||||
|
||||
// Since the justified node does not have a best descendant so the best node
|
||||
// Since the justified node does not have a best descendant, the best node
|
||||
// is itself.
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: indexToHash(1)}
|
||||
h, err := f.store.head(context.Background())
|
||||
@@ -213,7 +213,7 @@ func TestStore_Prune_ReturnEarly(t *testing.T) {
|
||||
require.Equal(t, nodeCount, f.NodeCount())
|
||||
}
|
||||
|
||||
// This unit tests starts with a simple branch like this
|
||||
// This unit test starts with a simple branch like this
|
||||
//
|
||||
// - 1
|
||||
// /
|
||||
|
||||
@@ -298,7 +298,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
// Check that the justification point is not the parent's.
|
||||
// This tests that the heuristics in pullTips did not apply and
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
@@ -315,7 +315,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
// Check that the justification point is not the parent's.
|
||||
// This tests that the heuristics in pullTips did not apply and
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(1), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
@@ -331,7 +331,7 @@ func TestStore_PullTips_Heuristics(t *testing.T) {
|
||||
require.NoError(tt, err)
|
||||
require.NoError(tt, f.InsertNode(ctx, st, root))
|
||||
// Check that the justification point is not the parent's.
|
||||
// This tests that the heuristics in pullTips did not apply and
|
||||
// This test checks that the heuristics in pullTips did not apply and
|
||||
// the test continues to compute a bogus unrealized
|
||||
// justification
|
||||
require.Equal(tt, primitives.Epoch(2), f.store.nodeByRoot[[32]byte{'h'}].unrealizedJustifiedEpoch)
|
||||
|
||||
@@ -24,6 +24,7 @@ go_library(
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/db/slasherkv:go_default_library",
|
||||
"//beacon-chain/deterministic-genesis:go_default_library",
|
||||
@@ -85,6 +86,7 @@ go_test(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/monitor:go_default_library",
|
||||
|
||||
@@ -40,7 +40,7 @@ func configureHistoricalSlasher(cliCtx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
cmdConfig := cmd.Get()
|
||||
// Allow up to 4096 attestations at a time to be requested from the beacon nde.
|
||||
// Allow up to 4096 attestations at a time to be requested from the beacon node.
|
||||
cmdConfig.MaxRPCPageSize = int(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)) // lint:ignore uintcast -- Page size should not exceed int64 with these constants.
|
||||
cmd.Init(cmdConfig)
|
||||
log.Warnf(
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/slasherkv"
|
||||
interopcoldstart "github.com/prysmaticlabs/prysm/v4/beacon-chain/deterministic-genesis"
|
||||
@@ -112,6 +113,7 @@ type BeaconNode struct {
|
||||
forkChoicer forkchoice.ForkChoicer
|
||||
clockWaiter startup.ClockWaiter
|
||||
initialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -200,6 +202,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting DB")
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, err
|
||||
@@ -639,6 +642,7 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
blockchain.WithClockSynchronizer(gs),
|
||||
blockchain.WithSyncComplete(syncComplete),
|
||||
blockchain.WithBlobStorage(b.BlobStorage),
|
||||
)
|
||||
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
@@ -717,6 +721,7 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}) erro
|
||||
regularsync.WithClockWaiter(b.clockWaiter),
|
||||
regularsync.WithInitialSyncComplete(initialSyncComplete),
|
||||
regularsync.WithStateNotifier(b),
|
||||
regularsync.WithBlobStorage(b.BlobStorage),
|
||||
)
|
||||
return b.services.RegisterService(rs)
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/monitor"
|
||||
@@ -70,7 +71,8 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
ctx := cli.NewContext(&app, set, nil)
|
||||
node, err := New(ctx, WithBlockchainFlagOptions([]blockchain.Option{}),
|
||||
WithBuilderFlagOptions([]builder.Option{}),
|
||||
WithExecutionChainOptions([]execution.Option{}))
|
||||
WithExecutionChainOptions([]execution.Option{}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)))
|
||||
require.NoError(t, err)
|
||||
node.services = &runtime.ServiceRegistry{}
|
||||
go func() {
|
||||
@@ -148,11 +150,12 @@ func TestClearDB(t *testing.T) {
|
||||
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
context := cli.NewContext(&app, set, nil)
|
||||
_, err = New(context, WithExecutionChainOptions([]execution.Option{
|
||||
execution.WithHttpEndpoint(endpoint),
|
||||
}))
|
||||
options := []Option{
|
||||
WithExecutionChainOptions([]execution.Option{execution.WithHttpEndpoint(endpoint)}),
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
}
|
||||
_, err = New(context, options...)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.LogsContain(t, hook, "Removing database")
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package node
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
)
|
||||
|
||||
@@ -32,3 +33,11 @@ func WithBuilderFlagOptions(opts []builder.Option) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobStorage sets the BlobStorage backend for the BeaconNode
|
||||
func WithBlobStorage(bs *filesystem.BlobStorage) Option {
|
||||
return func(bn *BeaconNode) error {
|
||||
bn.BlobStorage = bs
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
|
||||
|
||||
// BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork.
|
||||
func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error {
|
||||
func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, localAttestation bool) error {
|
||||
if att == nil {
|
||||
return errors.New("attempted to broadcast nil attestation")
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func (s *Service) BroadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a subnet peer if none available.
|
||||
go s.broadcastAttestation(ctx, subnet, att, forkDigest)
|
||||
go s.broadcastAttestation(ctx, subnet, att, forkDigest, localAttestation)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -94,7 +94,7 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte) {
|
||||
func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, forkDigest [4]byte, localAttestation bool) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -141,6 +141,18 @@ func (s *Service) broadcastAttestation(ctx context.Context, subnet uint64, att *
|
||||
return
|
||||
}
|
||||
|
||||
if localAttestation {
|
||||
peers := s.pubsub.ListPeers(attestationToTopic(subnet, forkDigest) + s.Encoding().ProtocolSuffix())
|
||||
for _, p := range peers {
|
||||
rawAgent, err := s.host.Peerstore().Get(p, "AgentVersion")
|
||||
agent, ok := rawAgent.(string)
|
||||
if err != nil || !ok {
|
||||
agent = "unknown"
|
||||
}
|
||||
log.Infof("Broadcasting attestation to peer with id %s and agent %s", p.String(), agent)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, att, attestationToTopic(subnet, forkDigest)); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast attestation")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -204,7 +216,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
|
||||
// BroadcastBlob broadcasts a blob to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input subnet.
|
||||
func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error {
|
||||
func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.BroadcastBlob")
|
||||
defer span.End()
|
||||
if blob == nil {
|
||||
@@ -223,7 +235,7 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.SignedBlobSidecar, forkDigest [4]byte) {
|
||||
func (s *Service) broadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [4]byte) {
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
|
||||
@@ -469,18 +469,18 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
}),
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.SignedBlobSidecar{
|
||||
Message: ðpb.DeprecatedBlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength),
|
||||
Index: 1,
|
||||
Slot: 2,
|
||||
BlockParentRoot: bytesutil.PadTo([]byte{'B'}, fieldparams.RootLength),
|
||||
ProposerIndex: 3,
|
||||
Blob: bytesutil.PadTo([]byte{'C'}, fieldparams.BlobLength),
|
||||
KzgCommitment: bytesutil.PadTo([]byte{'D'}, fieldparams.BLSPubkeyLength),
|
||||
KzgProof: bytesutil.PadTo([]byte{'E'}, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte{'F'}, fieldparams.BLSSignatureLength),
|
||||
header := util.HydrateSignedBeaconHeader(ðpb.SignedBeaconBlockHeader{})
|
||||
commitmentInclusionProof := make([][]byte, 17)
|
||||
for i := range commitmentInclusionProof {
|
||||
commitmentInclusionProof[i] = bytesutil.PadTo([]byte{}, 32)
|
||||
}
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: 1,
|
||||
Blob: bytesutil.PadTo([]byte{'C'}, fieldparams.BlobLength),
|
||||
KzgCommitment: bytesutil.PadTo([]byte{'D'}, fieldparams.BLSPubkeyLength),
|
||||
KzgProof: bytesutil.PadTo([]byte{'E'}, fieldparams.BLSPubkeyLength),
|
||||
SignedBlockHeader: header,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
subnet := uint64(0)
|
||||
|
||||
@@ -508,7 +508,7 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
result := ðpb.SignedBlobSidecar{}
|
||||
result := ðpb.BlobSidecar{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
require.DeepEqual(t, result, blobSidecar)
|
||||
}(t)
|
||||
|
||||
@@ -21,7 +21,7 @@ var gossipTopicMappings = map[string]proto.Message{
|
||||
SyncContributionAndProofSubnetTopicFormat: ðpb.SignedContributionAndProof{},
|
||||
SyncCommitteeSubnetTopicFormat: ðpb.SyncCommitteeMessage{},
|
||||
BlsToExecutionChangeSubnetTopicFormat: ðpb.SignedBLSToExecutionChange{},
|
||||
BlobSubnetTopicFormat: ðpb.SignedBlobSidecar{},
|
||||
BlobSubnetTopicFormat: ðpb.BlobSidecar{},
|
||||
}
|
||||
|
||||
// GossipTopicMappings is a function to return the assigned data type
|
||||
|
||||
@@ -33,9 +33,9 @@ type P2P interface {
|
||||
// Broadcaster broadcasts messages to peers over the p2p pubsub protocol.
|
||||
type Broadcaster interface {
|
||||
Broadcast(context.Context, proto.Message) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation) error
|
||||
BroadcastAttestation(ctx context.Context, subnet uint64, att *ethpb.Attestation, localAttestation bool) error
|
||||
BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.SignedBlobSidecar) error
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -144,7 +144,7 @@ func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *
|
||||
}
|
||||
|
||||
// BroadcastBlob -- fake.
|
||||
func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.SignedBlobSidecar) error {
|
||||
func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ func (m *MockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ uin
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob for mock.
|
||||
func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
|
||||
func (m *MockBroadcaster) BroadcastBlob(context.Context, uint64, *ethpb.BlobSidecar) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -178,7 +178,7 @@ func (p *TestP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *
|
||||
}
|
||||
|
||||
// BroadcastBlob broadcasts a blob for mock.
|
||||
func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.SignedBlobSidecar) error {
|
||||
func (p *TestP2P) BroadcastBlob(context.Context, uint64, *ethpb.BlobSidecar) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//beacon-chain/rpc/eth/config:go_default_library",
|
||||
"//beacon-chain/rpc/eth/debug:go_default_library",
|
||||
"//beacon-chain/rpc/eth/events:go_default_library",
|
||||
"//beacon-chain/rpc/eth/light-client:go_default_library",
|
||||
"//beacon-chain/rpc/eth/node:go_default_library",
|
||||
"//beacon-chain/rpc/eth/rewards:go_default_library",
|
||||
"//beacon-chain/rpc/eth/validator:go_default_library",
|
||||
@@ -74,6 +75,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -141,7 +141,7 @@ func (s *Server) SubmitAttestations(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.Data.CommitteeIndex, att.Data.Slot)
|
||||
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, att); err != nil {
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, att, true); err != nil {
|
||||
log.WithError(err).Errorf("could not broadcast attestation at index %d", i)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -49,7 +51,32 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
isFinalized := s.FinalizationFetcher.IsFinalized(ctx, blockRoot)
|
||||
|
||||
rawIds := r.URL.Query()["id"]
|
||||
var req GetValidatorsRequest
|
||||
if r.Method == http.MethodPost {
|
||||
err = json.NewDecoder(r.Body).Decode(&req)
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
case err != nil:
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var statuses []string
|
||||
var rawIds []string
|
||||
if r.Method == http.MethodGet {
|
||||
rawIds = r.URL.Query()["id"]
|
||||
statuses = r.URL.Query()["status"]
|
||||
} else {
|
||||
rawIds = req.Ids
|
||||
statuses = req.Statuses
|
||||
}
|
||||
for i, ss := range statuses {
|
||||
statuses[i] = strings.ToLower(ss)
|
||||
}
|
||||
|
||||
ids, ok := decodeIds(w, st, rawIds, true /* ignore unknown */)
|
||||
if !ok {
|
||||
return
|
||||
@@ -72,11 +99,6 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
|
||||
epoch := slots.ToEpoch(st.Slot())
|
||||
allBalances := st.Balances()
|
||||
|
||||
statuses := r.URL.Query()["status"]
|
||||
for i, ss := range statuses {
|
||||
statuses[i] = strings.ToLower(ss)
|
||||
}
|
||||
|
||||
// Exit early if no matching validators were found or we don't want to further filter validators by status.
|
||||
if len(readOnlyVals) == 0 || len(statuses) == 0 {
|
||||
containers := make([]*ValidatorContainer, len(readOnlyVals))
|
||||
@@ -234,7 +256,21 @@ func (bs *Server) GetValidatorBalances(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
isFinalized := bs.FinalizationFetcher.IsFinalized(ctx, blockRoot)
|
||||
|
||||
rawIds := r.URL.Query()["id"]
|
||||
var rawIds []string
|
||||
if r.Method == http.MethodGet {
|
||||
rawIds = r.URL.Query()["id"]
|
||||
} else {
|
||||
err = json.NewDecoder(r.Body).Decode(&rawIds)
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
|
||||
return
|
||||
case err != nil:
|
||||
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ids, ok := decodeIds(w, st, rawIds, true /* ignore unknown */)
|
||||
if !ok {
|
||||
return
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -26,8 +27,12 @@ import (
|
||||
)
|
||||
|
||||
func TestGetValidators(t *testing.T) {
|
||||
const exitedValIndex = 3
|
||||
var st state.BeaconState
|
||||
st, _ = util.DeterministicGenesisState(t, 8192)
|
||||
st, _ = util.DeterministicGenesisState(t, 4)
|
||||
vals := st.Validators()
|
||||
vals[exitedValIndex].ExitEpoch = 0
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
|
||||
t.Run("get all", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
@@ -49,7 +54,7 @@ func TestGetValidators(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetValidatorsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 8192, len(resp.Data))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
val := resp.Data[0]
|
||||
assert.Equal(t, "0", val.Index)
|
||||
assert.Equal(t, "32000000000", val.Balance)
|
||||
@@ -77,7 +82,7 @@ func TestGetValidators(t *testing.T) {
|
||||
|
||||
request := httptest.NewRequest(
|
||||
http.MethodGet,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validators?id=15&id=26",
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validators?id=0&id=1",
|
||||
nil,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
@@ -89,8 +94,8 @@ func TestGetValidators(t *testing.T) {
|
||||
resp := &GetValidatorsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
assert.Equal(t, "15", resp.Data[0].Index)
|
||||
assert.Equal(t, "26", resp.Data[1].Index)
|
||||
assert.Equal(t, "0", resp.Data[0].Index)
|
||||
assert.Equal(t, "1", resp.Data[1].Index)
|
||||
})
|
||||
t.Run("get by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
@@ -103,8 +108,8 @@ func TestGetValidators(t *testing.T) {
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
pubkey1 := st.PubkeyAtIndex(primitives.ValidatorIndex(20))
|
||||
pubkey2 := st.PubkeyAtIndex(primitives.ValidatorIndex(66))
|
||||
pubkey1 := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
|
||||
pubkey2 := st.PubkeyAtIndex(primitives.ValidatorIndex(1))
|
||||
hexPubkey1 := hexutil.Encode(pubkey1[:])
|
||||
hexPubkey2 := hexutil.Encode(pubkey2[:])
|
||||
request := httptest.NewRequest(
|
||||
@@ -121,8 +126,8 @@ func TestGetValidators(t *testing.T) {
|
||||
resp := &GetValidatorsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
assert.Equal(t, "20", resp.Data[0].Index)
|
||||
assert.Equal(t, "66", resp.Data[1].Index)
|
||||
assert.Equal(t, "0", resp.Data[0].Index)
|
||||
assert.Equal(t, "1", resp.Data[1].Index)
|
||||
})
|
||||
t.Run("get by both index and pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
@@ -135,11 +140,11 @@ func TestGetValidators(t *testing.T) {
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
pubkey := st.PubkeyAtIndex(primitives.ValidatorIndex(20))
|
||||
pubkey := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
|
||||
hexPubkey := hexutil.Encode(pubkey[:])
|
||||
request := httptest.NewRequest(
|
||||
http.MethodGet,
|
||||
fmt.Sprintf("http://example.com/eth/v1/beacon/states/{state_id}/validators?id=%s&id=60", hexPubkey),
|
||||
fmt.Sprintf("http://example.com/eth/v1/beacon/states/{state_id}/validators?id=%s&id=1", hexPubkey),
|
||||
nil,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
@@ -151,8 +156,8 @@ func TestGetValidators(t *testing.T) {
|
||||
resp := &GetValidatorsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
assert.Equal(t, "20", resp.Data[0].Index)
|
||||
assert.Equal(t, "60", resp.Data[1].Index)
|
||||
assert.Equal(t, "0", resp.Data[0].Index)
|
||||
assert.Equal(t, "1", resp.Data[1].Index)
|
||||
})
|
||||
t.Run("state ID required", func(t *testing.T) {
|
||||
s := Server{
|
||||
@@ -275,11 +280,139 @@ func TestGetValidators(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
t.Run("POST", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
req := &GetValidatorsRequest{
|
||||
Ids: []string{"0", strconv.Itoa(exitedValIndex)},
|
||||
Statuses: []string{"exited"},
|
||||
}
|
||||
b, err := json.Marshal(req)
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(b)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(
|
||||
http.MethodPost,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validators",
|
||||
&body,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetValidators(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetValidatorsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 1, len(resp.Data))
|
||||
assert.Equal(t, "3", resp.Data[0].Index)
|
||||
})
|
||||
t.Run("POST nil values", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
req := &GetValidatorsRequest{
|
||||
Ids: nil,
|
||||
Statuses: nil,
|
||||
}
|
||||
b, err := json.Marshal(req)
|
||||
require.NoError(t, err)
|
||||
_, err = body.Write(b)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(
|
||||
http.MethodPost,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validators",
|
||||
&body,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetValidators(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetValidatorsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
})
|
||||
t.Run("POST empty", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(
|
||||
http.MethodPost,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validators",
|
||||
nil,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetValidators(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "No data submitted", e.Message)
|
||||
})
|
||||
t.Run("POST invalid", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
body := bytes.Buffer{}
|
||||
_, err := body.WriteString("foo")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(
|
||||
http.MethodPost,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validators",
|
||||
&body,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetValidators(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Could not decode request body", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListValidators_FilterByStatus(t *testing.T) {
|
||||
func TestGetValidators_FilterByStatus(t *testing.T) {
|
||||
var st state.BeaconState
|
||||
st, _ = util.DeterministicGenesisState(t, 8192)
|
||||
st, _ = util.DeterministicGenesisState(t, 1)
|
||||
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
validators := []*eth.Validator{
|
||||
@@ -366,7 +499,7 @@ func TestListValidators_FilterByStatus(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetValidatorsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, 8192+2, len(resp.Data))
|
||||
assert.Equal(t, 3, len(resp.Data))
|
||||
for _, vc := range resp.Data {
|
||||
assert.Equal(
|
||||
t,
|
||||
@@ -397,7 +530,7 @@ func TestListValidators_FilterByStatus(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetValidatorsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, 8192+1, len(resp.Data))
|
||||
assert.Equal(t, 2, len(resp.Data))
|
||||
for _, vc := range resp.Data {
|
||||
require.Equal(
|
||||
t,
|
||||
@@ -506,7 +639,7 @@ func TestListValidators_FilterByStatus(t *testing.T) {
|
||||
|
||||
func TestGetValidator(t *testing.T) {
|
||||
var st state.BeaconState
|
||||
st, _ = util.DeterministicGenesisState(t, 8192)
|
||||
st, _ = util.DeterministicGenesisState(t, 2)
|
||||
|
||||
t.Run("get by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
@@ -520,7 +653,7 @@ func TestGetValidator(t *testing.T) {
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/validators/{validator_id}", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "15"})
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "0"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
@@ -528,12 +661,12 @@ func TestGetValidator(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetValidatorResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, "15", resp.Data.Index)
|
||||
assert.Equal(t, "0", resp.Data.Index)
|
||||
assert.Equal(t, "32000000000", resp.Data.Balance)
|
||||
assert.Equal(t, "active_ongoing", resp.Data.Status)
|
||||
require.NotNil(t, resp.Data.Validator)
|
||||
assert.Equal(t, "0x872c61b4a7f8510ec809e5b023f5fdda2105d024c470ddbbeca4bc74e8280af0d178d749853e8f6a841083ac1b4db98f", resp.Data.Validator.Pubkey)
|
||||
assert.Equal(t, "0x00b24fc624e56a5ed42a9639691e27e34b783c7237030367bd17cbef65fa6ccf", resp.Data.Validator.WithdrawalCredentials)
|
||||
assert.Equal(t, "0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c", resp.Data.Validator.Pubkey)
|
||||
assert.Equal(t, "0x00ec7ef7780c9d151597924036262dd28dc60e1228f4da6fecf9d402cb3f3594", resp.Data.Validator.WithdrawalCredentials)
|
||||
assert.Equal(t, "32000000000", resp.Data.Validator.EffectiveBalance)
|
||||
assert.Equal(t, false, resp.Data.Validator.Slashed)
|
||||
assert.Equal(t, "0", resp.Data.Validator.ActivationEligibilityEpoch)
|
||||
@@ -552,7 +685,7 @@ func TestGetValidator(t *testing.T) {
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
pubKey := st.PubkeyAtIndex(primitives.ValidatorIndex(20))
|
||||
pubKey := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
|
||||
hexPubkey := hexutil.Encode(pubKey[:])
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/validators/{validator_id}", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": hexPubkey})
|
||||
@@ -563,7 +696,7 @@ func TestGetValidator(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetValidatorResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, "20", resp.Data.Index)
|
||||
assert.Equal(t, "0", resp.Data.Index)
|
||||
})
|
||||
t.Run("state ID required", func(t *testing.T) {
|
||||
s := Server{
|
||||
@@ -657,7 +790,7 @@ func TestGetValidator(t *testing.T) {
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/validators/{validator_id}", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "15"})
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "0"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
@@ -685,7 +818,7 @@ func TestGetValidator(t *testing.T) {
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/beacon/states/{state_id}/validators/{validator_id}", nil)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "15"})
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head", "validator_id": "0"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
@@ -699,7 +832,7 @@ func TestGetValidator(t *testing.T) {
|
||||
|
||||
func TestGetValidatorBalances(t *testing.T) {
|
||||
var st state.BeaconState
|
||||
count := uint64(8192)
|
||||
count := uint64(4)
|
||||
st, _ = util.DeterministicGenesisState(t, count)
|
||||
balances := make([]uint64, count)
|
||||
for i := uint64(0); i < count; i++ {
|
||||
@@ -727,10 +860,10 @@ func TestGetValidatorBalances(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetValidatorBalancesResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 8192, len(resp.Data))
|
||||
val := resp.Data[123]
|
||||
assert.Equal(t, "123", val.Index)
|
||||
assert.Equal(t, "123", val.Balance)
|
||||
require.Equal(t, 4, len(resp.Data))
|
||||
val := resp.Data[3]
|
||||
assert.Equal(t, "3", val.Index)
|
||||
assert.Equal(t, "3", val.Balance)
|
||||
})
|
||||
t.Run("get by index", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
@@ -745,7 +878,7 @@ func TestGetValidatorBalances(t *testing.T) {
|
||||
|
||||
request := httptest.NewRequest(
|
||||
http.MethodGet,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=15&id=26",
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=0&id=1",
|
||||
nil,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
@@ -757,8 +890,8 @@ func TestGetValidatorBalances(t *testing.T) {
|
||||
resp := &GetValidatorBalancesResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
assert.Equal(t, "15", resp.Data[0].Index)
|
||||
assert.Equal(t, "26", resp.Data[1].Index)
|
||||
assert.Equal(t, "0", resp.Data[0].Index)
|
||||
assert.Equal(t, "1", resp.Data[1].Index)
|
||||
})
|
||||
t.Run("get by pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
@@ -770,8 +903,8 @@ func TestGetValidatorBalances(t *testing.T) {
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
pubkey1 := st.PubkeyAtIndex(primitives.ValidatorIndex(20))
|
||||
pubkey2 := st.PubkeyAtIndex(primitives.ValidatorIndex(66))
|
||||
pubkey1 := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
|
||||
pubkey2 := st.PubkeyAtIndex(primitives.ValidatorIndex(1))
|
||||
hexPubkey1 := hexutil.Encode(pubkey1[:])
|
||||
hexPubkey2 := hexutil.Encode(pubkey2[:])
|
||||
|
||||
@@ -789,8 +922,8 @@ func TestGetValidatorBalances(t *testing.T) {
|
||||
resp := &GetValidatorBalancesResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
assert.Equal(t, "20", resp.Data[0].Index)
|
||||
assert.Equal(t, "66", resp.Data[1].Index)
|
||||
assert.Equal(t, "0", resp.Data[0].Index)
|
||||
assert.Equal(t, "1", resp.Data[1].Index)
|
||||
})
|
||||
t.Run("get by both index and pubkey", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
@@ -803,11 +936,11 @@ func TestGetValidatorBalances(t *testing.T) {
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
pubkey := st.PubkeyAtIndex(primitives.ValidatorIndex(20))
|
||||
pubkey := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
|
||||
hexPubkey := hexutil.Encode(pubkey[:])
|
||||
request := httptest.NewRequest(
|
||||
http.MethodGet,
|
||||
fmt.Sprintf("http://example.com/eth/v1/beacon/states/{state_id}/validators?id=%s&id=60", hexPubkey),
|
||||
fmt.Sprintf("http://example.com/eth/v1/beacon/states/{state_id}/validators?id=%s&id=1", hexPubkey),
|
||||
nil,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
@@ -819,8 +952,8 @@ func TestGetValidatorBalances(t *testing.T) {
|
||||
resp := &GetValidatorsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
assert.Equal(t, "20", resp.Data[0].Index)
|
||||
assert.Equal(t, "60", resp.Data[1].Index)
|
||||
assert.Equal(t, "0", resp.Data[0].Index)
|
||||
assert.Equal(t, "1", resp.Data[1].Index)
|
||||
})
|
||||
t.Run("unknown pubkey is ignored", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
@@ -906,7 +1039,7 @@ func TestGetValidatorBalances(t *testing.T) {
|
||||
|
||||
request := httptest.NewRequest(
|
||||
http.MethodGet,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=15",
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=0",
|
||||
nil,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
@@ -938,7 +1071,7 @@ func TestGetValidatorBalances(t *testing.T) {
|
||||
|
||||
request := httptest.NewRequest(
|
||||
http.MethodGet,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=15",
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances?id=0",
|
||||
nil,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
@@ -951,4 +1084,96 @@ func TestGetValidatorBalances(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, true, resp.Finalized)
|
||||
})
|
||||
t.Run("POST", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
pubkey1 := st.PubkeyAtIndex(primitives.ValidatorIndex(0))
|
||||
pubkey2 := st.PubkeyAtIndex(primitives.ValidatorIndex(1))
|
||||
hexPubkey1 := hexutil.Encode(pubkey1[:])
|
||||
hexPubkey2 := hexutil.Encode(pubkey2[:])
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(fmt.Sprintf("[\"%s\",\"%s\"]", hexPubkey1, hexPubkey2))
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(
|
||||
http.MethodPost,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances",
|
||||
&body,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetValidatorBalances(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &GetValidatorBalancesResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 2, len(resp.Data))
|
||||
assert.Equal(t, "0", resp.Data[0].Index)
|
||||
assert.Equal(t, "1", resp.Data[1].Index)
|
||||
})
|
||||
t.Run("POST empty", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(
|
||||
http.MethodPost,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances",
|
||||
nil,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetValidatorBalances(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "No data submitted", e.Message)
|
||||
})
|
||||
t.Run("POST invalid", func(t *testing.T) {
|
||||
chainService := &chainMock.ChainService{}
|
||||
s := Server{
|
||||
Stater: &testutil.MockStater{
|
||||
BeaconState: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
FinalizationFetcher: chainService,
|
||||
}
|
||||
|
||||
body := bytes.Buffer{}
|
||||
_, err := body.WriteString("foo")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(
|
||||
http.MethodPost,
|
||||
"http://example.com/eth/v1/beacon/states/{state_id}/validator_balances",
|
||||
&body,
|
||||
)
|
||||
request = mux.SetURLVars(request, map[string]string{"state_id": "head"})
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetValidatorBalances(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &http2.DefaultErrorJson{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Could not decode request body", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -76,6 +76,11 @@ type GetBlockHeaderResponse struct {
|
||||
Data *shared.SignedBeaconBlockHeaderContainer `json:"data"`
|
||||
}
|
||||
|
||||
type GetValidatorsRequest struct {
|
||||
Ids []string `json:"ids"`
|
||||
Statuses []string `json:"statuses"`
|
||||
}
|
||||
|
||||
type GetValidatorsResponse struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -171,7 +172,7 @@ func TestGetSpec(t *testing.T) {
|
||||
data, ok := resp.Data.(map[string]interface{})
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
assert.Equal(t, 112, len(data))
|
||||
assert.Equal(t, 113, len(data))
|
||||
for k, v := range data {
|
||||
switch k {
|
||||
case "CONFIG_NAME":
|
||||
@@ -414,6 +415,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "160", v)
|
||||
case "MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":
|
||||
assert.Equal(t, "8", v)
|
||||
case "MAX_REQUEST_LIGHT_CLIENT_UPDATES":
|
||||
assert.Equal(t, "128", v)
|
||||
case "SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY":
|
||||
default:
|
||||
t.Errorf("Incorrect key: %s", k)
|
||||
|
||||
55
beacon-chain/rpc/eth/light-client/BUILD.bazel
Normal file
55
beacon-chain/rpc/eth/light-client/BUILD.bazel
Normal file
@@ -0,0 +1,55 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"handlers.go",
|
||||
"helpers.go",
|
||||
"server.go",
|
||||
"structs.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/light-client",
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//network/http:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_wealdtech_go_bytesutil//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["handlers_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
],
|
||||
)
|
||||
406
beacon-chain/rpc/eth/light-client/handlers.go
Normal file
406
beacon-chain/rpc/eth/light-client/handlers.go
Normal file
@@ -0,0 +1,406 @@
|
||||
package lightclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
|
||||
// GetLightClientBootstrap - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/bootstrap.yaml
|
||||
func (s *Server) GetLightClientBootstrap(w http.ResponseWriter, req *http.Request) {
|
||||
// Prepare
|
||||
ctx, span := trace.StartSpan(req.Context(), "beacon.GetLightClientBootstrap")
|
||||
defer span.End()
|
||||
|
||||
// Get the block
|
||||
blockRootParam, err := hexutil.Decode(mux.Vars(req)["block_root"])
|
||||
if err != nil {
|
||||
http2.HandleError(w, "invalid block root: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(blockRootParam)
|
||||
blk, err := s.Blocker.Block(ctx, blockRoot[:])
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the state
|
||||
state, err := s.Stater.StateBySlot(ctx, blk.Block().Slot())
|
||||
if err != nil {
|
||||
http2.HandleError(w, "could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
bootstrap, err := createLightClientBootstrap(ctx, state)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "could not get light client bootstrap: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
response := &LightClientBootstrapResponse{
|
||||
Version: version.String(blk.Version()),
|
||||
Data: bootstrap,
|
||||
}
|
||||
|
||||
http2.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// GetLightClientUpdatesByRange - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/updates.yaml
|
||||
func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.Request) {
|
||||
// Prepare
|
||||
ctx, span := trace.StartSpan(req.Context(), "beacon.GetLightClientUpdatesByRange")
|
||||
defer span.End()
|
||||
|
||||
// Determine slots per period
|
||||
config := params.BeaconConfig()
|
||||
slotsPerPeriod := uint64(config.EpochsPerSyncCommitteePeriod) * uint64(config.SlotsPerEpoch)
|
||||
|
||||
// Adjust count based on configuration
|
||||
gotCount, _, count := shared.UintFromQuery(w, req, "count")
|
||||
if !gotCount {
|
||||
return
|
||||
} else if count == 0 {
|
||||
http2.HandleError(w, fmt.Sprintf("got invalid 'count' query variable '%d': count must be greater than 0", count), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Determine the start and end periods
|
||||
gotStartPeriod, _, startPeriod := shared.UintFromQuery(w, req, "start_period")
|
||||
if !gotStartPeriod {
|
||||
return
|
||||
}
|
||||
|
||||
if count > config.MaxRequestLightClientUpdates {
|
||||
count = config.MaxRequestLightClientUpdates
|
||||
}
|
||||
|
||||
// max possible slot is current head
|
||||
headState, err := s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "could not get head state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
maxSlot := uint64(headState.Slot())
|
||||
|
||||
// min possible slot is Altair fork period
|
||||
minSlot := uint64(config.AltairForkEpoch) * uint64(config.SlotsPerEpoch)
|
||||
|
||||
// Adjust startPeriod, the end of start period must be later than Altair fork epoch, otherwise, can not get the sync committee votes
|
||||
startPeriodEndSlot := (startPeriod+1)*slotsPerPeriod - 1
|
||||
if startPeriodEndSlot < minSlot {
|
||||
startPeriod = minSlot / slotsPerPeriod
|
||||
}
|
||||
|
||||
// Get the initial endPeriod, then we will adjust
|
||||
endPeriod := startPeriod + count - 1
|
||||
|
||||
// Adjust endPeriod, the end of end period must be earlier than current head slot
|
||||
endPeriodEndSlot := (endPeriod+1)*slotsPerPeriod - 1
|
||||
if endPeriodEndSlot > maxSlot {
|
||||
endPeriod = maxSlot / slotsPerPeriod
|
||||
}
|
||||
|
||||
// Populate updates
|
||||
var updates []*LightClientUpdateWithVersion
|
||||
for period := startPeriod; period <= endPeriod; period++ {
|
||||
// Get the last known state of the period,
|
||||
// 1. We wish the block has a parent in the same period if possible
|
||||
// 2. We wish the block has a state in the same period
|
||||
lastSlotInPeriod := period*slotsPerPeriod + slotsPerPeriod - 1
|
||||
if lastSlotInPeriod > maxSlot {
|
||||
lastSlotInPeriod = maxSlot
|
||||
}
|
||||
firstSlotInPeriod := period * slotsPerPeriod
|
||||
|
||||
// Let's not use the first slot in the period, otherwise the attested header will be in previous period
|
||||
firstSlotInPeriod++
|
||||
|
||||
var state state.BeaconState
|
||||
var block interfaces.ReadOnlySignedBeaconBlock
|
||||
for slot := lastSlotInPeriod; slot >= firstSlotInPeriod; slot-- {
|
||||
state, err = s.Stater.StateBySlot(ctx, types.Slot(slot))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the block
|
||||
latestBlockHeader := state.LatestBlockHeader()
|
||||
latestStateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
latestBlockHeader.StateRoot = latestStateRoot[:]
|
||||
blockRoot, err := latestBlockHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
block, err = s.Blocker.Block(ctx, blockRoot[:])
|
||||
if err != nil || block == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
syncAggregate, err := block.Block().Body().SyncAggregate()
|
||||
if err != nil || syncAggregate == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if syncAggregate.SyncCommitteeBits.Count()*3 < config.SyncCommitteeSize*2 {
|
||||
// Not enough votes
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if block == nil {
|
||||
// No valid block found for the period
|
||||
continue
|
||||
}
|
||||
|
||||
// Get attested state
|
||||
attestedRoot := block.Block().ParentRoot()
|
||||
attestedBlock, err := s.Blocker.Block(ctx, attestedRoot[:])
|
||||
if err != nil || attestedBlock == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
attestedSlot := attestedBlock.Block().Slot()
|
||||
attestedState, err := s.Stater.StateBySlot(ctx, attestedSlot)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get finalized block
|
||||
var finalizedBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
finalizedCheckPoint := attestedState.FinalizedCheckpoint()
|
||||
if finalizedCheckPoint != nil {
|
||||
finalizedRoot := bytesutil.ToBytes32(finalizedCheckPoint.Root)
|
||||
finalizedBlock, err = s.Blocker.Block(ctx, finalizedRoot[:])
|
||||
if err != nil {
|
||||
finalizedBlock = nil
|
||||
}
|
||||
}
|
||||
|
||||
update, err := createLightClientUpdate(
|
||||
ctx,
|
||||
state,
|
||||
block,
|
||||
attestedState,
|
||||
finalizedBlock,
|
||||
)
|
||||
|
||||
if err == nil {
|
||||
updates = append(updates, &LightClientUpdateWithVersion{
|
||||
Version: version.String(attestedState.Version()),
|
||||
Data: update,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(updates) == 0 {
|
||||
http2.HandleError(w, "no updates found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
response := &LightClientUpdatesByRangeResponse{
|
||||
Updates: updates,
|
||||
}
|
||||
|
||||
http2.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// GetLightClientFinalityUpdate - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/finality_update.yaml
|
||||
func (s *Server) GetLightClientFinalityUpdate(w http.ResponseWriter, req *http.Request) {
|
||||
// Prepare
|
||||
ctx, span := trace.StartSpan(req.Context(), "beacon.GetLightClientFinalityUpdate")
|
||||
defer span.End()
|
||||
|
||||
// Finality update needs super majority of sync committee signatures
|
||||
minSyncCommitteeParticipants := float64(params.BeaconConfig().MinSyncCommitteeParticipants)
|
||||
minSignatures := uint64(math.Ceil(minSyncCommitteeParticipants * 2 / 3))
|
||||
|
||||
block, err := s.getLightClientEventBlock(ctx, minSignatures)
|
||||
if !shared.WriteBlockFetchError(w, block, err) {
|
||||
return
|
||||
}
|
||||
|
||||
state, err := s.Stater.StateBySlot(ctx, block.Block().Slot())
|
||||
if err != nil {
|
||||
http2.HandleError(w, "could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Get attested state
|
||||
attestedRoot := block.Block().ParentRoot()
|
||||
attestedBlock, err := s.Blocker.Block(ctx, attestedRoot[:])
|
||||
if err != nil || attestedBlock == nil {
|
||||
http2.HandleError(w, "could not get attested block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
attestedSlot := attestedBlock.Block().Slot()
|
||||
attestedState, err := s.Stater.StateBySlot(ctx, attestedSlot)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "could not get attested state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Get finalized block
|
||||
var finalizedBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
finalizedCheckPoint := attestedState.FinalizedCheckpoint()
|
||||
if finalizedCheckPoint != nil {
|
||||
finalizedRoot := bytesutil.ToBytes32(finalizedCheckPoint.Root)
|
||||
finalizedBlock, err = s.Blocker.Block(ctx, finalizedRoot[:])
|
||||
if err != nil {
|
||||
finalizedBlock = nil
|
||||
}
|
||||
}
|
||||
|
||||
update, err := newLightClientFinalityUpdateFromBeaconState(
|
||||
ctx,
|
||||
state,
|
||||
block,
|
||||
attestedState,
|
||||
finalizedBlock,
|
||||
)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "could not get light client finality update: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
response := &LightClientUpdateWithVersion{
|
||||
Version: version.String(attestedState.Version()),
|
||||
Data: update,
|
||||
}
|
||||
|
||||
http2.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// GetLightClientOptimisticUpdate - implements https://github.com/ethereum/beacon-APIs/blob/263f4ed6c263c967f13279c7a9f5629b51c5fc55/apis/beacon/light_client/optimistic_update.yaml
|
||||
func (s *Server) GetLightClientOptimisticUpdate(w http.ResponseWriter, req *http.Request) {
|
||||
// Prepare
|
||||
ctx, span := trace.StartSpan(req.Context(), "beacon.GetLightClientOptimisticUpdate")
|
||||
defer span.End()
|
||||
|
||||
minSignatures := params.BeaconConfig().MinSyncCommitteeParticipants
|
||||
|
||||
block, err := s.getLightClientEventBlock(ctx, minSignatures)
|
||||
if !shared.WriteBlockFetchError(w, block, err) {
|
||||
return
|
||||
}
|
||||
|
||||
state, err := s.Stater.StateBySlot(ctx, block.Block().Slot())
|
||||
if err != nil {
|
||||
http2.HandleError(w, "could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Get attested state
|
||||
attestedRoot := block.Block().ParentRoot()
|
||||
attestedBlock, err := s.Blocker.Block(ctx, attestedRoot[:])
|
||||
if err != nil {
|
||||
http2.HandleError(w, "could not get attested block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if attestedBlock == nil {
|
||||
http2.HandleError(w, "attested block is nil", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
attestedSlot := attestedBlock.Block().Slot()
|
||||
attestedState, err := s.Stater.StateBySlot(ctx, attestedSlot)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "could not get attested state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
update, err := newLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx,
|
||||
state,
|
||||
block,
|
||||
attestedState,
|
||||
)
|
||||
if err != nil {
|
||||
http2.HandleError(w, "could not get light client optimistic update: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
response := &LightClientUpdateWithVersion{
|
||||
Version: version.String(attestedState.Version()),
|
||||
Data: update,
|
||||
}
|
||||
|
||||
http2.WriteJson(w, response)
|
||||
}
|
||||
|
||||
// getLightClientEventBlock - returns the block that should be used for light client events, which satisfies the minimum number of signatures from sync committee
|
||||
func (s *Server) getLightClientEventBlock(ctx context.Context, minSignaturesRequired uint64) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
// Get the current state
|
||||
state, err := s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get head state %v", err)
|
||||
}
|
||||
|
||||
// Get the block
|
||||
latestBlockHeader := *state.LatestBlockHeader()
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get state root %v", err)
|
||||
}
|
||||
latestBlockHeader.StateRoot = stateRoot[:]
|
||||
latestBlockHeaderRoot, err := latestBlockHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get latest block header root %v", err)
|
||||
}
|
||||
|
||||
block, err := s.Blocker.Block(ctx, latestBlockHeaderRoot[:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get latest block %v", err)
|
||||
}
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("latest block is nil")
|
||||
}
|
||||
|
||||
// Loop through the blocks until we find a block that satisfies minSignaturesRequired requirement
|
||||
var numOfSyncCommitteeSignatures uint64
|
||||
if syncAggregate, err := block.Block().Body().SyncAggregate(); err == nil && syncAggregate != nil {
|
||||
numOfSyncCommitteeSignatures = syncAggregate.SyncCommitteeBits.Count()
|
||||
}
|
||||
|
||||
for numOfSyncCommitteeSignatures < minSignaturesRequired {
|
||||
// Get the parent block
|
||||
parentRoot := block.Block().ParentRoot()
|
||||
block, err = s.Blocker.Block(ctx, parentRoot[:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get parent block %v", err)
|
||||
}
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("parent block is nil")
|
||||
}
|
||||
|
||||
// Get the number of sync committee signatures
|
||||
numOfSyncCommitteeSignatures = 0
|
||||
if syncAggregate, err := block.Block().Body().SyncAggregate(); err == nil && syncAggregate != nil {
|
||||
numOfSyncCommitteeSignatures = syncAggregate.SyncCommitteeBits.Count()
|
||||
}
|
||||
}
|
||||
|
||||
return block, nil
|
||||
}
|
||||
1012
beacon-chain/rpc/eth/light-client/handlers_test.go
Normal file
1012
beacon-chain/rpc/eth/light-client/handlers_test.go
Normal file
File diff suppressed because it is too large
Load Diff
309
beacon-chain/rpc/eth/light-client/helpers.go
Normal file
309
beacon-chain/rpc/eth/light-client/helpers.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package lightclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
v1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
|
||||
v2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v4/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
|
||||
// createLightClientBootstrap - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_bootstrap
|
||||
// def create_light_client_bootstrap(state: BeaconState) -> LightClientBootstrap:
|
||||
//
|
||||
// assert compute_epoch_at_slot(state.slot) >= ALTAIR_FORK_EPOCH
|
||||
// assert state.slot == state.latest_block_header.slot
|
||||
//
|
||||
// return LightClientBootstrap(
|
||||
// header=BeaconBlockHeader(
|
||||
// slot=state.latest_block_header.slot,
|
||||
// proposer_index=state.latest_block_header.proposer_index,
|
||||
// parent_root=state.latest_block_header.parent_root,
|
||||
// state_root=hash_tree_root(state),
|
||||
// body_root=state.latest_block_header.body_root,
|
||||
// ),
|
||||
// current_sync_committee=state.current_sync_committee,
|
||||
// current_sync_committee_branch=compute_merkle_proof_for_state(state, CURRENT_SYNC_COMMITTEE_INDEX)
|
||||
// )
|
||||
func createLightClientBootstrap(ctx context.Context, state state.BeaconState) (*LightClientBootstrap, error) {
|
||||
// assert compute_epoch_at_slot(state.slot) >= ALTAIR_FORK_EPOCH
|
||||
if slots.ToEpoch(state.Slot()) < params.BeaconConfig().AltairForkEpoch {
|
||||
return nil, fmt.Errorf("light client bootstrap is not supported before Altair, invalid slot %d", state.Slot())
|
||||
}
|
||||
|
||||
// assert state.slot == state.latest_block_header.slot
|
||||
latestBlockHeader := state.LatestBlockHeader()
|
||||
if state.Slot() != latestBlockHeader.Slot {
|
||||
return nil, fmt.Errorf("state slot %d not equal to latest block header slot %d", state.Slot(), latestBlockHeader.Slot)
|
||||
}
|
||||
|
||||
// Prepare data
|
||||
currentSyncCommittee, err := state.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get current sync committee: %s", err.Error())
|
||||
}
|
||||
|
||||
committee := shared.SyncCommitteeFromConsensus(currentSyncCommittee)
|
||||
|
||||
currentSyncCommitteeProof, err := state.CurrentSyncCommitteeProof(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get current sync committee proof: %s", err.Error())
|
||||
}
|
||||
|
||||
branch := make([]string, fieldparams.NextSyncCommitteeBranchDepth)
|
||||
for i, proof := range currentSyncCommitteeProof {
|
||||
branch[i] = hexutil.Encode(proof)
|
||||
}
|
||||
|
||||
header := shared.BeaconBlockHeaderFromConsensus(latestBlockHeader)
|
||||
if header == nil {
|
||||
return nil, fmt.Errorf("could not get beacon block header")
|
||||
}
|
||||
|
||||
// Above shared util function won't calculate state root, so we need to do it manually
|
||||
stateRoot, err := state.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get state root: %s", err.Error())
|
||||
}
|
||||
header.StateRoot = hexutil.Encode(stateRoot[:])
|
||||
|
||||
// Return result
|
||||
result := &LightClientBootstrap{
|
||||
Header: header,
|
||||
CurrentSyncCommittee: committee,
|
||||
CurrentSyncCommitteeBranch: branch,
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// createLightClientUpdate - implements https://github.
|
||||
// com/ethereum/consensus-specs/blob/d70dcd9926a4bbe987f1b4e65c3e05bd029fcfb8/specs/altair/light-client/full-node.md#create_light_client_update
|
||||
// def create_light_client_update(state: BeaconState,
|
||||
//
|
||||
// block: SignedBeaconBlock,
|
||||
// attested_state: BeaconState,
|
||||
// finalized_block: Optional[SignedBeaconBlock]) -> LightClientUpdate:
|
||||
// assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
// assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
//
|
||||
// assert state.slot == state.latest_block_header.slot
|
||||
// header = state.latest_block_header.copy()
|
||||
// header.state_root = hash_tree_root(state)
|
||||
// assert hash_tree_root(header) == hash_tree_root(block.message)
|
||||
// update_signature_period = compute_sync_committee_period(compute_epoch_at_slot(block.message.slot))
|
||||
//
|
||||
// assert attested_state.slot == attested_state.latest_block_header.slot
|
||||
// attested_header = attested_state.latest_block_header.copy()
|
||||
// attested_header.state_root = hash_tree_root(attested_state)
|
||||
// assert hash_tree_root(attested_header) == block.message.parent_root
|
||||
// update_attested_period = compute_sync_committee_period(compute_epoch_at_slot(attested_header.slot))
|
||||
//
|
||||
// # `next_sync_committee` is only useful if the message is signed by the current sync committee
|
||||
// if update_attested_period == update_signature_period:
|
||||
// next_sync_committee = attested_state.next_sync_committee
|
||||
// next_sync_committee_branch = compute_merkle_proof_for_state(attested_state, NEXT_SYNC_COMMITTEE_INDEX)
|
||||
// else:
|
||||
// next_sync_committee = SyncCommittee()
|
||||
// next_sync_committee_branch = [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
//
|
||||
// # Indicate finality whenever possible
|
||||
// if finalized_block is not None:
|
||||
// if finalized_block.message.slot != GENESIS_SLOT:
|
||||
// finalized_header = BeaconBlockHeader(
|
||||
// slot=finalized_block.message.slot,
|
||||
// proposer_index=finalized_block.message.proposer_index,
|
||||
// parent_root=finalized_block.message.parent_root,
|
||||
// state_root=finalized_block.message.state_root,
|
||||
// body_root=hash_tree_root(finalized_block.message.body),
|
||||
// )
|
||||
// assert hash_tree_root(finalized_header) == attested_state.finalized_checkpoint.root
|
||||
// else:
|
||||
// assert attested_state.finalized_checkpoint.root == Bytes32()
|
||||
// finalized_header = BeaconBlockHeader()
|
||||
// finality_branch = compute_merkle_proof_for_state(attested_state, FINALIZED_ROOT_INDEX)
|
||||
// else:
|
||||
// finalized_header = BeaconBlockHeader()
|
||||
// finality_branch = [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
|
||||
//
|
||||
// return LightClientUpdate(
|
||||
// attested_header=attested_header,
|
||||
// next_sync_committee=next_sync_committee,
|
||||
// next_sync_committee_branch=next_sync_committee_branch,
|
||||
// finalized_header=finalized_header,
|
||||
// finality_branch=finality_branch,
|
||||
// sync_aggregate=block.message.body.sync_aggregate,
|
||||
// signature_slot=block.message.slot,
|
||||
// )
|
||||
func createLightClientUpdate(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*LightClientUpdate, error) {
|
||||
result, err := blockchain.NewLightClientFinalityUpdateFromBeaconState(ctx, state, block, attestedState, finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate next sync committee and proof
|
||||
var nextSyncCommittee *v2.SyncCommittee
|
||||
var nextSyncCommitteeBranch [][]byte
|
||||
|
||||
// update_signature_period = compute_sync_committee_period(compute_epoch_at_slot(block.message.slot))
|
||||
updateSignaturePeriod := slots.ToEpoch(block.Block().Slot())
|
||||
|
||||
// update_attested_period = compute_sync_committee_period(compute_epoch_at_slot(attested_header.slot))
|
||||
updateAttestedPeriod := slots.ToEpoch(result.AttestedHeader.Slot)
|
||||
|
||||
if updateAttestedPeriod == updateSignaturePeriod {
|
||||
tempNextSyncCommittee, err := attestedState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get next sync committee: %s", err.Error())
|
||||
}
|
||||
|
||||
nextSyncCommittee = &v2.SyncCommittee{
|
||||
Pubkeys: tempNextSyncCommittee.Pubkeys,
|
||||
AggregatePubkey: tempNextSyncCommittee.AggregatePubkey,
|
||||
}
|
||||
|
||||
nextSyncCommitteeBranch, err = attestedState.NextSyncCommitteeProof(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get next sync committee proof: %s", err.Error())
|
||||
}
|
||||
} else {
|
||||
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
|
||||
pubKeys := make([][]byte, syncCommitteeSize)
|
||||
for i := uint64(0); i < syncCommitteeSize; i++ {
|
||||
pubKeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
nextSyncCommittee = &v2.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
}
|
||||
|
||||
nextSyncCommitteeBranch = make([][]byte, fieldparams.NextSyncCommitteeBranchDepth)
|
||||
for i := 0; i < fieldparams.NextSyncCommitteeBranchDepth; i++ {
|
||||
nextSyncCommitteeBranch[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
}
|
||||
|
||||
result.NextSyncCommittee = nextSyncCommittee
|
||||
result.NextSyncCommitteeBranch = nextSyncCommitteeBranch
|
||||
return newLightClientUpdateToJSON(result), nil
|
||||
}
|
||||
|
||||
func newLightClientFinalityUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*LightClientUpdate, error) {
|
||||
result, err := blockchain.NewLightClientFinalityUpdateFromBeaconState(ctx, state, block, attestedState, finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newLightClientUpdateToJSON(result), nil
|
||||
}
|
||||
|
||||
func newLightClientOptimisticUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState) (*LightClientUpdate, error) {
|
||||
result, err := blockchain.NewLightClientOptimisticUpdateFromBeaconState(ctx, state, block, attestedState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newLightClientUpdateToJSON(result), nil
|
||||
}
|
||||
|
||||
func NewLightClientBootstrapFromJSON(bootstrapJSON *LightClientBootstrap) (*v2.LightClientBootstrap, error) {
|
||||
bootstrap := &v2.LightClientBootstrap{}
|
||||
|
||||
var err error
|
||||
|
||||
v1Alpha1Header, err := bootstrapJSON.Header.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bootstrap.Header = migration.V1Alpha1HeaderToV1(v1Alpha1Header)
|
||||
|
||||
currentSyncCommittee, err := bootstrapJSON.CurrentSyncCommittee.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bootstrap.CurrentSyncCommittee = migration.V1Alpha1SyncCommitteeToV2(currentSyncCommittee)
|
||||
|
||||
if bootstrap.CurrentSyncCommitteeBranch, err = branchFromJSON(bootstrapJSON.CurrentSyncCommitteeBranch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bootstrap, nil
|
||||
}
|
||||
|
||||
func branchFromJSON(branch []string) ([][]byte, error) {
|
||||
var branchBytes [][]byte
|
||||
for _, root := range branch {
|
||||
branch, err := hexutil.Decode(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
branchBytes = append(branchBytes, branch)
|
||||
}
|
||||
return branchBytes, nil
|
||||
}
|
||||
|
||||
func branchToJSON(branchBytes [][]byte) []string {
|
||||
if branchBytes == nil {
|
||||
return nil
|
||||
}
|
||||
branch := make([]string, len(branchBytes))
|
||||
for i, root := range branchBytes {
|
||||
branch[i] = hexutil.Encode(root)
|
||||
}
|
||||
return branch
|
||||
}
|
||||
|
||||
func syncAggregateToJSON(input *v1.SyncAggregate) *shared.SyncAggregate {
|
||||
if input == nil {
|
||||
return nil
|
||||
}
|
||||
return &shared.SyncAggregate{
|
||||
SyncCommitteeBits: hexutil.Encode(input.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: hexutil.Encode(input.SyncCommitteeSignature),
|
||||
}
|
||||
}
|
||||
|
||||
func newLightClientUpdateToJSON(input *v2.LightClientUpdate) *LightClientUpdate {
|
||||
if input == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var nextSyncCommittee *shared.SyncCommittee
|
||||
if input.NextSyncCommittee != nil {
|
||||
nextSyncCommittee = shared.SyncCommitteeFromConsensus(migration.V2SyncCommitteeToV1Alpha1(input.NextSyncCommittee))
|
||||
}
|
||||
|
||||
return &LightClientUpdate{
|
||||
AttestedHeader: shared.BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(input.AttestedHeader)),
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: branchToJSON(input.NextSyncCommitteeBranch),
|
||||
FinalizedHeader: shared.BeaconBlockHeaderFromConsensus(migration.V1HeaderToV1Alpha1(input.FinalizedHeader)),
|
||||
FinalityBranch: branchToJSON(input.FinalityBranch),
|
||||
SyncAggregate: syncAggregateToJSON(input.SyncAggregate),
|
||||
SignatureSlot: strconv.FormatUint(uint64(input.SignatureSlot), 10),
|
||||
}
|
||||
}
|
||||
12
beacon-chain/rpc/eth/light-client/server.go
Normal file
12
beacon-chain/rpc/eth/light-client/server.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package lightclient
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Blocker lookup.Blocker
|
||||
Stater lookup.Stater
|
||||
HeadFetcher blockchain.HeadFetcher
|
||||
}
|
||||
35
beacon-chain/rpc/eth/light-client/structs.go
Normal file
35
beacon-chain/rpc/eth/light-client/structs.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package lightclient
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
|
||||
)
|
||||
|
||||
type LightClientBootstrapResponse struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientBootstrap `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientBootstrap struct {
|
||||
Header *shared.BeaconBlockHeader `json:"header"`
|
||||
CurrentSyncCommittee *shared.SyncCommittee `json:"current_sync_committee"`
|
||||
CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
|
||||
}
|
||||
|
||||
type LightClientUpdate struct {
|
||||
AttestedHeader *shared.BeaconBlockHeader `json:"attested_header"`
|
||||
NextSyncCommittee *shared.SyncCommittee `json:"next_sync_committee"`
|
||||
FinalizedHeader *shared.BeaconBlockHeader `json:"finalized_header"`
|
||||
SyncAggregate *shared.SyncAggregate `json:"sync_aggregate"`
|
||||
NextSyncCommitteeBranch []string `json:"next_sync_committee_branch"`
|
||||
FinalityBranch []string `json:"finality_branch"`
|
||||
SignatureSlot string `json:"signature_slot"`
|
||||
}
|
||||
|
||||
type LightClientUpdateWithVersion struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type LightClientUpdatesByRangeResponse struct {
|
||||
Updates []*LightClientUpdateWithVersion `json:"updates"`
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
|
||||
@@ -238,6 +239,11 @@ type SyncCommitteeMessage struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type SyncCommittee struct {
|
||||
Pubkeys []string `json:"pubkeys"`
|
||||
AggregatePubkey string `json:"aggregate_pubkey"`
|
||||
}
|
||||
|
||||
func (s *SignedValidatorRegistration) ToConsensus() (*eth.SignedValidatorRegistrationV1, error) {
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
@@ -624,6 +630,37 @@ func (m *SyncCommitteeMessage) ToConsensus() (*eth.SyncCommitteeMessage, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SyncCommitteeFromConsensus(sc *eth.SyncCommittee) *SyncCommittee {
|
||||
var sPubKeys []string
|
||||
for _, p := range sc.Pubkeys {
|
||||
sPubKeys = append(sPubKeys, hexutil.Encode(p))
|
||||
}
|
||||
|
||||
return &SyncCommittee{
|
||||
Pubkeys: sPubKeys,
|
||||
AggregatePubkey: hexutil.Encode(sc.AggregatePubkey),
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *SyncCommittee) ToConsensus() (*eth.SyncCommittee, error) {
|
||||
var pubKeys [][]byte
|
||||
for _, p := range sc.Pubkeys {
|
||||
pubKey, err := hexutil.Decode(p)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "Pubkeys")
|
||||
}
|
||||
pubKeys = append(pubKeys, pubKey)
|
||||
}
|
||||
aggPubKey, err := hexutil.Decode(sc.AggregatePubkey)
|
||||
if err != nil {
|
||||
return nil, NewDecodeError(err, "AggregatePubkey")
|
||||
}
|
||||
return ð.SyncCommittee{
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: aggPubKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SyncDetails contains information about node sync status.
|
||||
type SyncDetails struct {
|
||||
HeadSlot string `json:"head_slot"`
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
bytesutil2 "github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
|
||||
@@ -182,6 +182,7 @@ go_test(
|
||||
"assignments_test.go",
|
||||
"attester_test.go",
|
||||
"blocks_test.go",
|
||||
"construct_generic_block_test.go",
|
||||
"exit_test.go",
|
||||
"proposer_altair_test.go",
|
||||
"proposer_attestations_test.go",
|
||||
|
||||
@@ -77,7 +77,7 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
|
||||
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.Data.CommitteeIndex, att.Data.Slot)
|
||||
|
||||
// Broadcast the new attestation to the network.
|
||||
if err := vs.P2P.BroadcastAttestation(ctx, subnet, att); err != nil {
|
||||
if err := vs.P2P.BroadcastAttestation(ctx, subnet, att, true); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast attestation: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConstructGenericBeaconBlock(t *testing.T) {
|
||||
@@ -15,7 +15,7 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
|
||||
// Test when sBlk or sBlk.Block() is nil
|
||||
t.Run("NilBlock", func(t *testing.T) {
|
||||
_, err := vs.constructGenericBeaconBlock(nil, nil, nil)
|
||||
require.ErrorContains(t, err, "block cannot be nil")
|
||||
require.ErrorContains(t, "block cannot be nil", err)
|
||||
})
|
||||
|
||||
// Test for Deneb version
|
||||
@@ -26,12 +26,12 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
|
||||
r1, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
scs := []*ethpb.DeprecatedBlobSidecar{
|
||||
util.GenerateTestDenebBlobSidecar(r1, eb, 0, []byte{}),
|
||||
util.GenerateTestDenebBlobSidecar(r1, eb, 1, []byte{}),
|
||||
util.GenerateTestDenebBlobSidecar(r1, eb, 2, []byte{}),
|
||||
util.GenerateTestDenebBlobSidecar(r1, eb, 3, []byte{}),
|
||||
util.GenerateTestDenebBlobSidecar(r1, eb, 4, []byte{}),
|
||||
util.GenerateTestDenebBlobSidecar(r1, eb, 5, []byte{}),
|
||||
util.GenerateTestDeprecatedBlobSidecar(r1, eb, 0, []byte{}),
|
||||
util.GenerateTestDeprecatedBlobSidecar(r1, eb, 1, []byte{}),
|
||||
util.GenerateTestDeprecatedBlobSidecar(r1, eb, 2, []byte{}),
|
||||
util.GenerateTestDeprecatedBlobSidecar(r1, eb, 3, []byte{}),
|
||||
util.GenerateTestDeprecatedBlobSidecar(r1, eb, 4, []byte{}),
|
||||
util.GenerateTestDeprecatedBlobSidecar(r1, eb, 5, []byte{}),
|
||||
}
|
||||
result, err := vs.constructGenericBeaconBlock(b, nil, scs)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -47,9 +47,6 @@ const (
|
||||
// blindBlobsBundle holds the KZG commitments and other relevant sidecar data for a builder's beacon block.
|
||||
var blindBlobsBundle *enginev1.BlindedBlobsBundle
|
||||
|
||||
// fullBlobsBundle holds the KZG commitments and other relevant sidecar data for a local beacon block.
|
||||
var fullBlobsBundle *enginev1.BlobsBundle
|
||||
|
||||
// GetBeaconBlock is called by a proposer during its assigned slot to request a block to sign
|
||||
// by passing in the slot and the signed randao reveal of the slot.
|
||||
func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
|
||||
@@ -122,8 +119,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
sBlk.SetStateRoot(sr)
|
||||
|
||||
fullBlobs, err := blobsBundleToSidecars(fullBlobsBundle, sBlk.Block())
|
||||
fullBlobsBundle = nil // Reset full blobs bundle after use.
|
||||
fullBlobs, err := blobsBundleToSidecars(bundleCache.get(req.Slot), sBlk.Block())
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not convert blobs bundle to sidecar: %v", err)
|
||||
}
|
||||
@@ -261,9 +257,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
"blockRoot": hex.EncodeToString(sc.Message.BlockRoot),
|
||||
"index": sc.Message.Index,
|
||||
}).Debug("Broadcasting blob sidecar")
|
||||
if err := vs.P2P.BroadcastBlob(ctx, sc.Message.Index, sc); err != nil {
|
||||
log.WithError(err).Errorf("Could not broadcast blob sidecar index %d / %d", i, len(scs))
|
||||
}
|
||||
// TODO: Broadcast sidecar will be fixed in #13189
|
||||
sidecars[i] = sc.Message
|
||||
}
|
||||
if len(scs) > 0 {
|
||||
|
||||
@@ -285,6 +285,7 @@ func matchingWithdrawalsRoot(local, builder interfaces.ExecutionData) (bool, err
|
||||
// It delegates to setExecution for the actual work.
|
||||
func setLocalExecution(blk interfaces.SignedBeaconBlock, execution interfaces.ExecutionData) error {
|
||||
var kzgCommitments [][]byte
|
||||
fullBlobsBundle := bundleCache.get(blk.Block().Slot())
|
||||
if fullBlobsBundle != nil {
|
||||
kzgCommitments = fullBlobsBundle.KzgCommitments
|
||||
}
|
||||
|
||||
@@ -270,6 +270,7 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockDeneb())
|
||||
blk.SetSlot(1)
|
||||
require.NoError(t, err)
|
||||
vs.BlockBuilder = &builderTest.MockBuilderService{
|
||||
HasConfigured: false,
|
||||
@@ -288,7 +289,8 @@ func TestServer_setExecutionData(t *testing.T) {
|
||||
localPayload, _, err := vs.getLocalPayload(ctx, blk.Block(), capellaTransitionState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(4), localPayload.BlockNumber())
|
||||
require.DeepEqual(t, fullBlobsBundle, blobsBundle)
|
||||
cachedBundle := bundleCache.get(blk.Block().Slot())
|
||||
require.DeepEqual(t, cachedBundle, blobsBundle)
|
||||
})
|
||||
t.Run("Can get builder payload and blobs in Deneb", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
|
||||
@@ -1,12 +1,59 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
)
|
||||
|
||||
var bundleCache = &blobsBundleCache{}
|
||||
|
||||
// BlobsBundleCache holds the KZG commitments and other relevant sidecar data for a local beacon block.
|
||||
type blobsBundleCache struct {
|
||||
sync.Mutex
|
||||
slot primitives.Slot
|
||||
bundle *enginev1.BlobsBundle
|
||||
}
|
||||
|
||||
// add adds a blobs bundle to the cache.
|
||||
// same slot overwrites the previous bundle.
|
||||
func (c *blobsBundleCache) add(slot primitives.Slot, bundle *enginev1.BlobsBundle) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if slot >= c.slot {
|
||||
c.bundle = bundle
|
||||
c.slot = slot
|
||||
}
|
||||
}
|
||||
|
||||
// get gets a blobs bundle from the cache.
|
||||
func (c *blobsBundleCache) get(slot primitives.Slot) *enginev1.BlobsBundle {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if c.slot == slot {
|
||||
return c.bundle
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// prune acquires the lock before pruning.
|
||||
func (c *blobsBundleCache) prune(minSlot primitives.Slot) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
if minSlot > c.slot {
|
||||
c.slot = 0
|
||||
c.bundle = nil
|
||||
}
|
||||
}
|
||||
|
||||
// coverts a blobs bundle to a sidecar format.
|
||||
func blobsBundleToSidecars(bundle *enginev1.BlobsBundle, blk interfaces.ReadOnlyBeaconBlock) ([]*ethpb.DeprecatedBlobSidecar, error) {
|
||||
if blk.Version() < version.Deneb {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -73,3 +74,34 @@ func Test_blindBlobsBundleToSidecars(t *testing.T) {
|
||||
require.DeepEqual(t, sidecars[i].KzgCommitment, kcs[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
slot := primitives.Slot(1)
|
||||
bundle := &enginev1.BlobsBundle{KzgCommitments: [][]byte{{'a'}}}
|
||||
bundleCache.add(slot, bundle)
|
||||
require.Equal(t, bundleCache.bundle, bundle)
|
||||
|
||||
slot = primitives.Slot(2)
|
||||
bundle = &enginev1.BlobsBundle{KzgCommitments: [][]byte{{'b'}}}
|
||||
bundleCache.add(slot, bundle)
|
||||
require.Equal(t, bundleCache.bundle, bundle)
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
slot := primitives.Slot(3)
|
||||
bundle := &enginev1.BlobsBundle{KzgCommitments: [][]byte{{'a'}}}
|
||||
bundleCache.add(slot, bundle)
|
||||
require.Equal(t, bundleCache.get(slot), bundle)
|
||||
}
|
||||
|
||||
func TestPrune(t *testing.T) {
|
||||
slot1 := primitives.Slot(4)
|
||||
bundle1 := &enginev1.BlobsBundle{KzgCommitments: [][]byte{{'a'}}}
|
||||
|
||||
bundleCache.add(slot1, bundle1)
|
||||
bundleCache.prune(slot1 + 1)
|
||||
|
||||
if bundleCache.get(slot1) != nil {
|
||||
t.Errorf("Prune did not remove the bundle at slot1")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,11 +79,10 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
var pid [8]byte
|
||||
copy(pid[:], payloadId[:])
|
||||
payloadIDCacheHit.Inc()
|
||||
var payload interfaces.ExecutionData
|
||||
var overrideBuilder bool
|
||||
payload, fullBlobsBundle, overrideBuilder, err = vs.ExecutionEngineCaller.GetPayload(ctx, pid, slot)
|
||||
payload, bundle, overrideBuilder, err := vs.ExecutionEngineCaller.GetPayload(ctx, pid, slot)
|
||||
switch {
|
||||
case err == nil:
|
||||
bundleCache.add(slot, bundle)
|
||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||
return payload, overrideBuilder, nil
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
@@ -178,12 +177,11 @@ func (vs *Server) getLocalPayload(ctx context.Context, blk interfaces.ReadOnlyBe
|
||||
if payloadID == nil {
|
||||
return nil, false, fmt.Errorf("nil payload with block hash: %#x", parentHash)
|
||||
}
|
||||
var payload interfaces.ExecutionData
|
||||
var overrideBuilder bool
|
||||
payload, fullBlobsBundle, overrideBuilder, err = vs.ExecutionEngineCaller.GetPayload(ctx, *payloadID, slot)
|
||||
payload, bundle, overrideBuilder, err := vs.ExecutionEngineCaller.GetPayload(ctx, *payloadID, slot)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
bundleCache.add(slot, bundle)
|
||||
warnIfFeeRecipientDiffers(payload, feeRecipient)
|
||||
return payload, overrideBuilder, nil
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
@@ -200,3 +201,25 @@ func (vs *Server) WaitForChainStart(_ *emptypb.Empty, stream ethpb.BeaconNodeVal
|
||||
}
|
||||
return stream.Send(res)
|
||||
}
|
||||
|
||||
// PruneBlobsBundleCacheRoutine prunes the blobs bundle cache at 6s mark of the slot.
|
||||
func (vs *Server) PruneBlobsBundleCacheRoutine() {
|
||||
go func() {
|
||||
clock, err := vs.ClockWaiter.WaitForClock(vs.Ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("PruneBlobsBundleCacheRoutine failed to receive genesis data")
|
||||
return
|
||||
}
|
||||
|
||||
pruneInterval := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot/2)
|
||||
ticker := slots.NewSlotTickerWithIntervals(clock.GenesisTime(), []time.Duration{pruneInterval})
|
||||
for {
|
||||
select {
|
||||
case <-vs.Ctx.Done():
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
bundleCache.prune(slotInterval.Slot)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -15,6 +15,13 @@ import (
|
||||
grpcopentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
|
||||
grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/plugin/ocgrpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/reflection"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
|
||||
@@ -37,6 +44,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/config"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/debug"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/node"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/rewards"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/validator"
|
||||
@@ -57,12 +65,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
ethpbservice "github.com/prysmaticlabs/prysm/v4/proto/eth/service"
|
||||
ethpbv1alpha1 "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/plugin/ocgrpc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/reflection"
|
||||
)
|
||||
|
||||
const attestationBufferSize = 100
|
||||
@@ -469,14 +471,24 @@ func (s *Service) Start() {
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/headers/{block_id}", beaconChainServerV1.GetBlockHeader).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/genesis", beaconChainServerV1.GetGenesis).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/finality_checkpoints", beaconChainServerV1.GetFinalityCheckpoints).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validators", beaconChainServerV1.GetValidators).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validators", beaconChainServerV1.GetValidators).Methods(http.MethodGet, http.MethodPost)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validators/{validator_id}", beaconChainServerV1.GetValidator).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validator_balances", beaconChainServerV1.GetValidatorBalances).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/states/{state_id}/validator_balances", beaconChainServerV1.GetValidatorBalances).Methods(http.MethodGet, http.MethodPost)
|
||||
|
||||
s.cfg.Router.HandleFunc("/eth/v1/config/deposit_contract", config.GetDepositContract).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/config/fork_schedule", config.GetForkSchedule).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/config/spec", config.GetSpec).Methods(http.MethodGet)
|
||||
|
||||
lightClientServer := &lightclient.Server{
|
||||
Blocker: blocker,
|
||||
Stater: stater,
|
||||
HeadFetcher: s.cfg.HeadFetcher,
|
||||
}
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/bootstrap/{block_root}", lightClientServer.GetLightClientBootstrap).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/updates", lightClientServer.GetLightClientUpdatesByRange).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/finality_update", lightClientServer.GetLightClientFinalityUpdate).Methods(http.MethodGet)
|
||||
s.cfg.Router.HandleFunc("/eth/v1/beacon/light_client/optimistic_update", lightClientServer.GetLightClientOptimisticUpdate).Methods(http.MethodGet)
|
||||
|
||||
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
|
||||
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
|
||||
ethpbv1alpha1.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
|
||||
@@ -519,6 +531,8 @@ func (s *Service) Start() {
|
||||
// Register reflection service on gRPC server.
|
||||
reflection.Register(s.grpcServer)
|
||||
|
||||
validatorServer.PruneBlobsBundleCacheRoutine()
|
||||
|
||||
go func() {
|
||||
if s.listener != nil {
|
||||
if err := s.grpcServer.Serve(s.listener); err != nil {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/gorilla/mux"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
@@ -37,6 +38,7 @@ func TestLifecycle_OK(t *testing.T) {
|
||||
ExecutionChainService: &mockExecution.Chain{},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
Router: mux.NewRouter(),
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
|
||||
rpcService.Start()
|
||||
@@ -78,6 +80,7 @@ func TestRPC_InsecureEndpoint(t *testing.T) {
|
||||
ExecutionChainService: &mockExecution.Chain{},
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
Router: mux.NewRouter(),
|
||||
ClockWaiter: startup.NewClockSynchronizer(),
|
||||
})
|
||||
|
||||
rpcService.Start()
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"field_trie.go",
|
||||
"field_trie_helpers.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/fieldtrie",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/state/state-native/custom-types:go_default_library",
|
||||
"//beacon-chain/state/state-native/types:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"field_trie_test.go",
|
||||
"helpers_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/state/state-native/custom-types:go_default_library",
|
||||
"//beacon-chain/state/state-native/types:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -36,6 +36,8 @@ go_library(
|
||||
"spec_parameters.go",
|
||||
"ssz.go",
|
||||
"state_trie.go",
|
||||
"field_trie.go",
|
||||
"field_trie_helpers.go",
|
||||
"types.go",
|
||||
] + select({
|
||||
"//config:mainnet": ["beacon_state_mainnet.go"],
|
||||
@@ -46,7 +48,6 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/fieldtrie:go_default_library",
|
||||
"//beacon-chain/state/state-native/custom-types:go_default_library",
|
||||
"//beacon-chain/state/state-native/types:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
@@ -80,6 +81,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"field_trie_test.go",
|
||||
"getters_attestation_test.go",
|
||||
"getters_block_test.go",
|
||||
"getters_checkpoint_test.go",
|
||||
@@ -88,6 +90,7 @@ go_test(
|
||||
"getters_validator_test.go",
|
||||
"getters_withdrawal_test.go",
|
||||
"hasher_test.go",
|
||||
"helpers_test.go",
|
||||
"mvslice_fuzz_test.go",
|
||||
"proofs_test.go",
|
||||
"readonly_validator_test.go",
|
||||
@@ -108,6 +111,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native/custom-types:go_default_library",
|
||||
"//beacon-chain/state/state-native/types:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/testing:go_default_library",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/fieldtrie"
|
||||
customtypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/custom-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
@@ -64,7 +63,7 @@ type BeaconState struct {
|
||||
lock sync.RWMutex
|
||||
dirtyFields map[types.FieldIndex]bool
|
||||
dirtyIndices map[types.FieldIndex][]uint64
|
||||
stateFieldLeaves map[types.FieldIndex]*fieldtrie.FieldTrie
|
||||
stateFieldLeaves map[types.FieldIndex]*FieldTrie
|
||||
rebuildTrie map[types.FieldIndex]bool
|
||||
valMapHandler *stateutil.ValidatorMapHandler
|
||||
merkleLayers [][][]byte
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/fieldtrie"
|
||||
customtypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/custom-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
@@ -64,7 +63,7 @@ type BeaconState struct {
|
||||
lock sync.RWMutex
|
||||
dirtyFields map[types.FieldIndex]bool
|
||||
dirtyIndices map[types.FieldIndex][]uint64
|
||||
stateFieldLeaves map[types.FieldIndex]*fieldtrie.FieldTrie
|
||||
stateFieldLeaves map[types.FieldIndex]*FieldTrie
|
||||
rebuildTrie map[types.FieldIndex]bool
|
||||
valMapHandler *stateutil.ValidatorMapHandler
|
||||
merkleLayers [][][]byte
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fieldtrie
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
multi_value_slice "github.com/prysmaticlabs/prysm/v4/container/multi-value-slice"
|
||||
pmath "github.com/prysmaticlabs/prysm/v4/math"
|
||||
)
|
||||
|
||||
@@ -51,12 +52,22 @@ func NewFieldTrie(field types.FieldIndex, fieldInfo types.DataType, elements int
|
||||
if err := validateElements(field, fieldInfo, elements, length); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
type temp[O multi_value_slice.Identifiable] interface {
|
||||
Len(obj O) int
|
||||
State() *BeaconState
|
||||
}
|
||||
switch fieldInfo {
|
||||
case types.BasicArray:
|
||||
fl, err := stateutil.ReturnTrieLayer(fieldRoots, length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
numOfElems := 0
|
||||
if val, ok := elements.(temp[*BeaconState]); ok {
|
||||
numOfElems = val.Len(val.State())
|
||||
} else {
|
||||
numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
|
||||
}
|
||||
return &FieldTrie{
|
||||
fieldLayers: fl,
|
||||
field: field,
|
||||
@@ -64,9 +75,15 @@ func NewFieldTrie(field types.FieldIndex, fieldInfo types.DataType, elements int
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: length,
|
||||
numOfElems: reflect.Indirect(reflect.ValueOf(elements)).Len(),
|
||||
numOfElems: numOfElems,
|
||||
}, nil
|
||||
case types.CompositeArray, types.CompressedArray:
|
||||
numOfElems := 0
|
||||
if val, ok := elements.(temp[*BeaconState]); ok {
|
||||
numOfElems = val.Len(val.State())
|
||||
} else {
|
||||
numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
|
||||
}
|
||||
return &FieldTrie{
|
||||
fieldLayers: stateutil.ReturnTrieLayerVariable(fieldRoots, length),
|
||||
field: field,
|
||||
@@ -74,7 +91,7 @@ func NewFieldTrie(field types.FieldIndex, fieldInfo types.DataType, elements int
|
||||
reference: stateutil.NewRef(1),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
length: length,
|
||||
numOfElems: reflect.Indirect(reflect.ValueOf(elements)).Len(),
|
||||
numOfElems: numOfElems,
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(fieldInfo).Name())
|
||||
@@ -100,20 +117,32 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
|
||||
if err := f.validateIndices(indices); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
type temp[O multi_value_slice.Identifiable] interface {
|
||||
Len(obj O) int
|
||||
State() *BeaconState
|
||||
}
|
||||
switch f.dataType {
|
||||
case types.BasicArray:
|
||||
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayer(fieldRoots, indices, f.fieldLayers)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
f.numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
|
||||
if val, ok := elements.(temp[*BeaconState]); ok {
|
||||
f.numOfElems = val.Len(val.State())
|
||||
} else {
|
||||
f.numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
|
||||
}
|
||||
return fieldRoot, nil
|
||||
case types.CompositeArray:
|
||||
fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(fieldRoots, indices, f.fieldLayers)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
f.numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
|
||||
if val, ok := elements.(temp[*BeaconState]); ok {
|
||||
f.numOfElems = val.Len(val.State())
|
||||
} else {
|
||||
f.numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
|
||||
}
|
||||
return stateutil.AddInMixin(fieldRoot, uint64(len(f.fieldLayers[0])))
|
||||
case types.CompressedArray:
|
||||
numOfElems, err := f.field.ElemsInChunk()
|
||||
@@ -142,7 +171,11 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
f.numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
|
||||
if val, ok := elements.(temp[*BeaconState]); ok {
|
||||
f.numOfElems = val.Len(val.State())
|
||||
} else {
|
||||
f.numOfElems = reflect.Indirect(reflect.ValueOf(elements)).Len()
|
||||
}
|
||||
return stateutil.AddInMixin(fieldRoot, uint64(f.numOfElems))
|
||||
default:
|
||||
return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name())
|
||||
@@ -1,4 +1,4 @@
|
||||
package fieldtrie
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
customtypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/custom-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
multi_value_slice "github.com/prysmaticlabs/prysm/v4/container/multi-value-slice"
|
||||
pmath "github.com/prysmaticlabs/prysm/v4/math"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -53,6 +54,17 @@ func validateElements(field types.FieldIndex, fieldInfo types.DataType, elements
|
||||
}
|
||||
length *= comLength
|
||||
}
|
||||
type temp[O multi_value_slice.Identifiable] interface {
|
||||
Len(obj O) int
|
||||
State() *BeaconState
|
||||
}
|
||||
if val, ok := elements.(temp[*BeaconState]); ok {
|
||||
totalLen := val.Len(val.State())
|
||||
if uint64(totalLen) > length {
|
||||
return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(), totalLen, length)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
val := reflect.Indirect(reflect.ValueOf(elements))
|
||||
if uint64(val.Len()) > length {
|
||||
return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(), val.Len(), length)
|
||||
@@ -63,12 +75,8 @@ func validateElements(field types.FieldIndex, fieldInfo types.DataType, elements
|
||||
// fieldConverters converts the corresponding field and the provided elements to the appropriate roots.
|
||||
func fieldConverters(field types.FieldIndex, indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
|
||||
switch field {
|
||||
case types.BlockRoots:
|
||||
return convert32ByteArrays[customtypes.BlockRoots](indices, elements, convertAll)
|
||||
case types.StateRoots:
|
||||
return convert32ByteArrays[customtypes.StateRoots](indices, elements, convertAll)
|
||||
case types.RandaoMixes:
|
||||
return convert32ByteArrays[customtypes.RandaoMixes](indices, elements, convertAll)
|
||||
case types.BlockRoots, types.StateRoots, types.RandaoMixes:
|
||||
return convertRoots(indices, elements, convertAll)
|
||||
case types.Eth1DataVotes:
|
||||
return convertEth1DataVotes(indices, elements, convertAll)
|
||||
case types.Validators:
|
||||
@@ -82,6 +90,21 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac
|
||||
}
|
||||
}
|
||||
|
||||
func convertRoots(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
|
||||
switch castedType := elements.(type) {
|
||||
case customtypes.BlockRoots:
|
||||
return handle32ByteArrays(castedType, indices, convertAll)
|
||||
case customtypes.StateRoots:
|
||||
return handle32ByteArrays(castedType, indices, convertAll)
|
||||
case customtypes.RandaoMixes:
|
||||
return handle32ByteArrays(castedType, indices, convertAll)
|
||||
case MultiValueSliceComposite[[32]byte, *BeaconState]:
|
||||
return handle32ByteMVslice(castedType, indices, convertAll)
|
||||
default:
|
||||
return nil, errors.Errorf("non-existnet type provided %T", castedType)
|
||||
}
|
||||
}
|
||||
|
||||
func convert32ByteArrays[T ~[][32]byte](indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
|
||||
val, ok := elements.(T)
|
||||
if !ok {
|
||||
@@ -116,11 +139,14 @@ func convertAttestations(indices []uint64, elements interface{}, convertAll bool
|
||||
}
|
||||
|
||||
func convertBalances(indices []uint64, elements interface{}, convertAll bool) ([][32]byte, error) {
|
||||
val, ok := elements.([]uint64)
|
||||
if !ok {
|
||||
switch casted := elements.(type) {
|
||||
case []uint64:
|
||||
return handleBalanceSlice(casted, indices, convertAll)
|
||||
case MultiValueSliceComposite[uint64, *BeaconState]:
|
||||
return handleBalanceMVSlice(casted, indices, convertAll)
|
||||
default:
|
||||
return nil, errors.Errorf("Wanted type of %T but got %T", []uint64{}, elements)
|
||||
}
|
||||
return handleBalanceSlice(val, indices, convertAll)
|
||||
}
|
||||
|
||||
// handle32ByteArrays computes and returns 32 byte arrays in a slice of root format.
|
||||
@@ -150,6 +176,39 @@ func handle32ByteArrays(val [][32]byte, indices []uint64, convertAll bool) ([][3
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
func handle32ByteMVslice(mv MultiValueSliceComposite[[32]byte, *BeaconState],
|
||||
indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
if convertAll {
|
||||
length = mv.Len(mv.State())
|
||||
}
|
||||
roots := make([][32]byte, 0, length)
|
||||
rootCreator := func(input [32]byte) {
|
||||
roots = append(roots, input)
|
||||
}
|
||||
if convertAll {
|
||||
val := mv.Value(mv.State())
|
||||
for i := range val {
|
||||
rootCreator(val[i])
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
totalLen := mv.Len(mv.State())
|
||||
if totalLen > 0 {
|
||||
for _, idx := range indices {
|
||||
if idx > uint64(totalLen)-1 {
|
||||
return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, totalLen)
|
||||
}
|
||||
val, err := mv.At(mv.State(), idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rootCreator(val)
|
||||
}
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
|
||||
// handleValidatorSlice returns the validator indices in a slice of root format.
|
||||
func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
length := len(indices)
|
||||
@@ -294,3 +353,48 @@ func handleBalanceSlice(val, indices []uint64, convertAll bool) ([][32]byte, err
|
||||
}
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
|
||||
func handleBalanceMVSlice(mv MultiValueSliceComposite[uint64, *BeaconState], indices []uint64, convertAll bool) ([][32]byte, error) {
|
||||
if convertAll {
|
||||
val := mv.Value(mv.State())
|
||||
return stateutil.PackUint64IntoChunks(val)
|
||||
}
|
||||
totalLen := mv.Len(mv.State())
|
||||
if totalLen > 0 {
|
||||
numOfElems, err := types.Balances.ElemsInChunk()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iNumOfElems, err := pmath.Int(numOfElems)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var roots [][32]byte
|
||||
for _, idx := range indices {
|
||||
// We split the indexes into their relevant groups. Balances
|
||||
// are compressed according to 4 values -> 1 chunk.
|
||||
startIdx := idx / numOfElems
|
||||
startGroup := startIdx * numOfElems
|
||||
var chunk [32]byte
|
||||
sizeOfElem := len(chunk) / iNumOfElems
|
||||
for i, j := 0, startGroup; j < startGroup+numOfElems; i, j = i+sizeOfElem, j+1 {
|
||||
wantedVal := uint64(0)
|
||||
// We are adding chunks in sets of 4, if the set is at the edge of the array
|
||||
// then you will need to zero out the rest of the chunk. Ex : 41 indexes,
|
||||
// so 41 % 4 = 1 . There are 3 indexes, which do not exist yet but we
|
||||
// have to add in as a root. These 3 indexes are then given a 'zero' value.
|
||||
if j < uint64(totalLen) {
|
||||
val, err := mv.At(mv.State(), j)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wantedVal = val
|
||||
}
|
||||
binary.LittleEndian.PutUint64(chunk[i:i+sizeOfElem], wantedVal)
|
||||
}
|
||||
roots = append(roots, chunk)
|
||||
}
|
||||
return roots, nil
|
||||
}
|
||||
return [][32]byte{}, nil
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
package fieldtrie_test
|
||||
package state_native_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/fieldtrie"
|
||||
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
|
||||
customtypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/custom-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
@@ -23,7 +23,7 @@ func TestFieldTrie_NewTrie(t *testing.T) {
|
||||
blockRoots[i] = [32]byte(r)
|
||||
}
|
||||
|
||||
trie, err := fieldtrie.NewFieldTrie(types.BlockRoots, types.BasicArray, customtypes.BlockRoots(blockRoots), uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
trie, err := state_native.NewFieldTrie(types.BlockRoots, types.BasicArray, customtypes.BlockRoots(blockRoots), uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
require.NoError(t, err)
|
||||
root, err := stateutil.RootsArrayHashTreeRoot(newState.BlockRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
require.NoError(t, err)
|
||||
@@ -33,15 +33,15 @@ func TestFieldTrie_NewTrie(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFieldTrie_NewTrie_NilElements(t *testing.T) {
|
||||
trie, err := fieldtrie.NewFieldTrie(types.BlockRoots, types.BasicArray, nil, 8234)
|
||||
trie, err := state_native.NewFieldTrie(types.BlockRoots, types.BasicArray, nil, 8234)
|
||||
require.NoError(t, err)
|
||||
_, err = trie.TrieRoot()
|
||||
require.ErrorIs(t, err, fieldtrie.ErrEmptyFieldTrie)
|
||||
require.ErrorIs(t, err, state_native.ErrEmptyFieldTrie)
|
||||
}
|
||||
|
||||
func TestFieldTrie_RecomputeTrie(t *testing.T) {
|
||||
newState, _ := util.DeterministicGenesisState(t, 32)
|
||||
trie, err := fieldtrie.NewFieldTrie(types.Validators, types.CompositeArray, newState.Validators(), params.BeaconConfig().ValidatorRegistryLimit)
|
||||
trie, err := state_native.NewFieldTrie(types.Validators, types.CompositeArray, newState.Validators(), params.BeaconConfig().ValidatorRegistryLimit)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldroot, err := trie.TrieRoot()
|
||||
@@ -72,7 +72,7 @@ func TestFieldTrie_RecomputeTrie(t *testing.T) {
|
||||
|
||||
func TestFieldTrie_RecomputeTrie_CompressedArray(t *testing.T) {
|
||||
newState, _ := util.DeterministicGenesisState(t, 32)
|
||||
trie, err := fieldtrie.NewFieldTrie(types.Balances, types.CompressedArray, newState.Balances(), stateutil.ValidatorLimitForBalancesChunks())
|
||||
trie, err := state_native.NewFieldTrie(types.Balances, types.CompressedArray, newState.Balances(), stateutil.ValidatorLimitForBalancesChunks())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, trie.Length(), stateutil.ValidatorLimitForBalancesChunks())
|
||||
changedIdx := []uint64{4, 8}
|
||||
@@ -89,7 +89,7 @@ func TestFieldTrie_RecomputeTrie_CompressedArray(t *testing.T) {
|
||||
|
||||
func TestNewFieldTrie_UnknownType(t *testing.T) {
|
||||
newState, _ := util.DeterministicGenesisState(t, 32)
|
||||
_, err := fieldtrie.NewFieldTrie(types.Balances, 4, newState.Balances(), 32)
|
||||
_, err := state_native.NewFieldTrie(types.Balances, 4, newState.Balances(), 32)
|
||||
require.ErrorContains(t, "unrecognized data type", err)
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ func TestFieldTrie_CopyTrieImmutable(t *testing.T) {
|
||||
randaoMixes[i] = [32]byte(r)
|
||||
}
|
||||
|
||||
trie, err := fieldtrie.NewFieldTrie(types.RandaoMixes, types.BasicArray, customtypes.RandaoMixes(randaoMixes), uint64(params.BeaconConfig().EpochsPerHistoricalVector))
|
||||
trie, err := state_native.NewFieldTrie(types.RandaoMixes, types.BasicArray, customtypes.RandaoMixes(randaoMixes), uint64(params.BeaconConfig().EpochsPerHistoricalVector))
|
||||
require.NoError(t, err)
|
||||
|
||||
newTrie := trie.CopyTrie()
|
||||
@@ -127,7 +127,7 @@ func TestFieldTrie_CopyTrieImmutable(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFieldTrie_CopyAndTransferEmpty(t *testing.T) {
|
||||
trie, err := fieldtrie.NewFieldTrie(types.RandaoMixes, types.BasicArray, nil, uint64(params.BeaconConfig().EpochsPerHistoricalVector))
|
||||
trie, err := state_native.NewFieldTrie(types.RandaoMixes, types.BasicArray, nil, uint64(params.BeaconConfig().EpochsPerHistoricalVector))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, trie, trie.CopyTrie())
|
||||
@@ -137,14 +137,14 @@ func TestFieldTrie_CopyAndTransferEmpty(t *testing.T) {
|
||||
func TestFieldTrie_TransferTrie(t *testing.T) {
|
||||
newState, _ := util.DeterministicGenesisState(t, 32)
|
||||
maxLength := (params.BeaconConfig().ValidatorRegistryLimit*8 + 31) / 32
|
||||
trie, err := fieldtrie.NewFieldTrie(types.Balances, types.CompressedArray, newState.Balances(), maxLength)
|
||||
trie, err := state_native.NewFieldTrie(types.Balances, types.CompressedArray, newState.Balances(), maxLength)
|
||||
require.NoError(t, err)
|
||||
oldRoot, err := trie.TrieRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
newTrie := trie.TransferTrie()
|
||||
root, err := trie.TrieRoot()
|
||||
require.ErrorIs(t, err, fieldtrie.ErrEmptyFieldTrie)
|
||||
require.ErrorIs(t, err, state_native.ErrEmptyFieldTrie)
|
||||
require.Equal(t, root, [32]byte{})
|
||||
require.NotNil(t, newTrie)
|
||||
newRoot, err := newTrie.TrieRoot()
|
||||
@@ -165,7 +165,7 @@ func FuzzFieldTrie(f *testing.F) {
|
||||
for i := 32; i < len(data); i += 32 {
|
||||
roots = append(roots, data[i-32:i])
|
||||
}
|
||||
trie, err := fieldtrie.NewFieldTrie(types.FieldIndex(idx), types.DataType(typ), roots, slotsPerHistRoot)
|
||||
trie, err := state_native.NewFieldTrie(types.FieldIndex(idx), types.DataType(typ), roots, slotsPerHistRoot)
|
||||
if err != nil {
|
||||
return // invalid inputs
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package fieldtrie
|
||||
package state_native
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/fieldtrie"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
@@ -56,7 +55,7 @@ func (b *BeaconState) CurrentSyncCommitteeProof(ctx context.Context) ([][]byte,
|
||||
if err := b.recomputeDirtyFields(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fieldtrie.ProofFromMerkleLayers(b.merkleLayers, types.CurrentSyncCommittee.RealPosition()), nil
|
||||
return ProofFromMerkleLayers(b.merkleLayers, types.CurrentSyncCommittee.RealPosition()), nil
|
||||
}
|
||||
|
||||
// NextSyncCommitteeProof from the state's Merkle trie representation.
|
||||
@@ -74,7 +73,7 @@ func (b *BeaconState) NextSyncCommitteeProof(ctx context.Context) ([][]byte, err
|
||||
if err := b.recomputeDirtyFields(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fieldtrie.ProofFromMerkleLayers(b.merkleLayers, types.NextSyncCommittee.RealPosition()), nil
|
||||
return ProofFromMerkleLayers(b.merkleLayers, types.NextSyncCommittee.RealPosition()), nil
|
||||
}
|
||||
|
||||
// FinalizedRootProof crafts a Merkle proof for the finalized root
|
||||
@@ -102,7 +101,7 @@ func (b *BeaconState) FinalizedRootProof(ctx context.Context) ([][]byte, error)
|
||||
epochRoot := bytesutil.ToBytes32(epochBuf)
|
||||
proof := make([][]byte, 0)
|
||||
proof = append(proof, epochRoot[:])
|
||||
branch := fieldtrie.ProofFromMerkleLayers(b.merkleLayers, types.FinalizedCheckpoint.RealPosition())
|
||||
branch := ProofFromMerkleLayers(b.merkleLayers, types.FinalizedCheckpoint.RealPosition())
|
||||
proof = append(proof, branch...)
|
||||
return proof, nil
|
||||
}
|
||||
|
||||
@@ -7,13 +7,13 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/fieldtrie"
|
||||
customtypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/custom-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stateutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
multi_value_slice "github.com/prysmaticlabs/prysm/v4/container/multi-value-slice"
|
||||
"github.com/prysmaticlabs/prysm/v4/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
|
||||
@@ -166,7 +166,7 @@ func InitializeFromProtoUnsafePhase0(st *ethpb.BeaconState) (state.BeaconState,
|
||||
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*FieldTrie, fieldCount),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||
valMapHandler: stateutil.NewValMapHandler(st.Validators),
|
||||
}
|
||||
@@ -207,7 +207,7 @@ func InitializeFromProtoUnsafePhase0(st *ethpb.BeaconState) (state.BeaconState,
|
||||
b.dirtyFields[f] = true
|
||||
b.rebuildTrie[f] = true
|
||||
b.dirtyIndices[f] = []uint64{}
|
||||
trie, err := fieldtrie.NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
trie, err := NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func InitializeFromProtoUnsafeAltair(st *ethpb.BeaconStateAltair) (state.BeaconS
|
||||
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*FieldTrie, fieldCount),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||
valMapHandler: stateutil.NewValMapHandler(st.Validators),
|
||||
}
|
||||
@@ -315,7 +315,7 @@ func InitializeFromProtoUnsafeAltair(st *ethpb.BeaconStateAltair) (state.BeaconS
|
||||
b.dirtyFields[f] = true
|
||||
b.rebuildTrie[f] = true
|
||||
b.dirtyIndices[f] = []uint64{}
|
||||
trie, err := fieldtrie.NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
trie, err := NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -382,7 +382,7 @@ func InitializeFromProtoUnsafeBellatrix(st *ethpb.BeaconStateBellatrix) (state.B
|
||||
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*FieldTrie, fieldCount),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||
valMapHandler: stateutil.NewValMapHandler(st.Validators),
|
||||
}
|
||||
@@ -425,7 +425,7 @@ func InitializeFromProtoUnsafeBellatrix(st *ethpb.BeaconStateBellatrix) (state.B
|
||||
b.dirtyFields[f] = true
|
||||
b.rebuildTrie[f] = true
|
||||
b.dirtyIndices[f] = []uint64{}
|
||||
trie, err := fieldtrie.NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
trie, err := NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -496,7 +496,7 @@ func InitializeFromProtoUnsafeCapella(st *ethpb.BeaconStateCapella) (state.Beaco
|
||||
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*FieldTrie, fieldCount),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||
valMapHandler: stateutil.NewValMapHandler(st.Validators),
|
||||
}
|
||||
@@ -539,7 +539,7 @@ func InitializeFromProtoUnsafeCapella(st *ethpb.BeaconStateCapella) (state.Beaco
|
||||
b.dirtyFields[f] = true
|
||||
b.rebuildTrie[f] = true
|
||||
b.dirtyIndices[f] = []uint64{}
|
||||
trie, err := fieldtrie.NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
trie, err := NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -609,7 +609,7 @@ func InitializeFromProtoUnsafeDeneb(st *ethpb.BeaconStateDeneb) (state.BeaconSta
|
||||
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*FieldTrie, fieldCount),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||
valMapHandler: stateutil.NewValMapHandler(st.Validators),
|
||||
}
|
||||
@@ -652,7 +652,7 @@ func InitializeFromProtoUnsafeDeneb(st *ethpb.BeaconStateDeneb) (state.BeaconSta
|
||||
b.dirtyFields[f] = true
|
||||
b.rebuildTrie[f] = true
|
||||
b.dirtyIndices[f] = []uint64{}
|
||||
trie, err := fieldtrie.NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
trie, err := NewFieldTrie(f, types.BasicArray, nil, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -755,7 +755,7 @@ func (b *BeaconState) Copy() state.BeaconState {
|
||||
dirtyFields: make(map[types.FieldIndex]bool, fieldCount),
|
||||
dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount),
|
||||
rebuildTrie: make(map[types.FieldIndex]bool, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount),
|
||||
stateFieldLeaves: make(map[types.FieldIndex]*FieldTrie, fieldCount),
|
||||
|
||||
// Share the reference to validator index map.
|
||||
valMapHandler: b.valMapHandler,
|
||||
@@ -1117,7 +1117,7 @@ func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interf
|
||||
}
|
||||
|
||||
func (b *BeaconState) resetFieldTrie(index types.FieldIndex, elements interface{}, length uint64) error {
|
||||
fTrie, err := fieldtrie.NewFieldTrie(index, fieldMap[index], elements, length)
|
||||
fTrie, err := NewFieldTrie(index, fieldMap[index], elements, length)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1178,7 +1178,10 @@ func finalizerCleanup(b *BeaconState) {
|
||||
func (b *BeaconState) blockRootsRootSelector(field types.FieldIndex) ([32]byte, error) {
|
||||
if b.rebuildTrie[field] {
|
||||
if features.Get().EnableExperimentalState {
|
||||
err := b.resetFieldTrie(field, customtypes.BlockRoots(b.blockRootsMultiValue.Value(b)), fieldparams.BlockRootsLength)
|
||||
err := b.resetFieldTrie(field, mv32Byte[[32]byte, *BeaconState]{
|
||||
b,
|
||||
b.blockRootsMultiValue,
|
||||
}, fieldparams.BlockRootsLength)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
@@ -1192,7 +1195,10 @@ func (b *BeaconState) blockRootsRootSelector(field types.FieldIndex) ([32]byte,
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
if features.Get().EnableExperimentalState {
|
||||
return b.recomputeFieldTrie(field, customtypes.BlockRoots(b.blockRootsMultiValue.Value(b)))
|
||||
return b.recomputeFieldTrie(field, mv32Byte[[32]byte, *BeaconState]{
|
||||
b,
|
||||
b.blockRootsMultiValue,
|
||||
})
|
||||
} else {
|
||||
return b.recomputeFieldTrie(field, b.blockRoots)
|
||||
}
|
||||
@@ -1201,7 +1207,10 @@ func (b *BeaconState) blockRootsRootSelector(field types.FieldIndex) ([32]byte,
|
||||
func (b *BeaconState) stateRootsRootSelector(field types.FieldIndex) ([32]byte, error) {
|
||||
if b.rebuildTrie[field] {
|
||||
if features.Get().EnableExperimentalState {
|
||||
err := b.resetFieldTrie(field, customtypes.StateRoots(b.stateRootsMultiValue.Value(b)), fieldparams.StateRootsLength)
|
||||
err := b.resetFieldTrie(field, mv32Byte[[32]byte, *BeaconState]{
|
||||
b,
|
||||
b.stateRootsMultiValue,
|
||||
}, fieldparams.StateRootsLength)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
@@ -1215,7 +1224,10 @@ func (b *BeaconState) stateRootsRootSelector(field types.FieldIndex) ([32]byte,
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
if features.Get().EnableExperimentalState {
|
||||
return b.recomputeFieldTrie(field, customtypes.StateRoots(b.stateRootsMultiValue.Value(b)))
|
||||
return b.recomputeFieldTrie(field, mv32Byte[[32]byte, *BeaconState]{
|
||||
b,
|
||||
b.stateRootsMultiValue,
|
||||
})
|
||||
} else {
|
||||
return b.recomputeFieldTrie(field, b.stateRoots)
|
||||
}
|
||||
@@ -1247,7 +1259,10 @@ func (b *BeaconState) validatorsRootSelector(field types.FieldIndex) ([32]byte,
|
||||
func (b *BeaconState) balancesRootSelector(field types.FieldIndex) ([32]byte, error) {
|
||||
if b.rebuildTrie[field] {
|
||||
if features.Get().EnableExperimentalState {
|
||||
err := b.resetFieldTrie(field, b.balancesMultiValue.Value(b), stateutil.ValidatorLimitForBalancesChunks())
|
||||
err := b.resetFieldTrie(field, mv32Byte[uint64, *BeaconState]{
|
||||
b,
|
||||
b.balancesMultiValue,
|
||||
}, stateutil.ValidatorLimitForBalancesChunks())
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
@@ -1261,7 +1276,10 @@ func (b *BeaconState) balancesRootSelector(field types.FieldIndex) ([32]byte, er
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
if features.Get().EnableExperimentalState {
|
||||
return b.recomputeFieldTrie(field, b.balancesMultiValue.Value(b))
|
||||
return b.recomputeFieldTrie(field, mv32Byte[uint64, *BeaconState]{
|
||||
b,
|
||||
b.balancesMultiValue,
|
||||
})
|
||||
} else {
|
||||
return b.recomputeFieldTrie(field, b.balances)
|
||||
}
|
||||
@@ -1270,7 +1288,10 @@ func (b *BeaconState) balancesRootSelector(field types.FieldIndex) ([32]byte, er
|
||||
func (b *BeaconState) randaoMixesRootSelector(field types.FieldIndex) ([32]byte, error) {
|
||||
if b.rebuildTrie[field] {
|
||||
if features.Get().EnableExperimentalState {
|
||||
err := b.resetFieldTrie(field, customtypes.RandaoMixes(b.randaoMixesMultiValue.Value(b)), fieldparams.RandaoMixesLength)
|
||||
err := b.resetFieldTrie(field, mv32Byte[[32]byte, *BeaconState]{
|
||||
b,
|
||||
b.randaoMixesMultiValue,
|
||||
}, fieldparams.RandaoMixesLength)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
@@ -1284,8 +1305,25 @@ func (b *BeaconState) randaoMixesRootSelector(field types.FieldIndex) ([32]byte,
|
||||
return b.stateFieldLeaves[field].TrieRoot()
|
||||
}
|
||||
if features.Get().EnableExperimentalState {
|
||||
return b.recomputeFieldTrie(field, customtypes.RandaoMixes(b.randaoMixesMultiValue.Value(b)))
|
||||
return b.recomputeFieldTrie(field, mv32Byte[[32]byte, *BeaconState]{
|
||||
b,
|
||||
b.randaoMixesMultiValue,
|
||||
})
|
||||
} else {
|
||||
return b.recomputeFieldTrie(field, b.randaoMixes)
|
||||
}
|
||||
}
|
||||
|
||||
type MultiValueSliceComposite[V comparable, O multi_value_slice.Identifiable] interface {
|
||||
State() *BeaconState
|
||||
multi_value_slice.MultiValueSlice[V, O]
|
||||
}
|
||||
|
||||
type mv32Byte[V comparable, O multi_value_slice.Identifiable] struct {
|
||||
*BeaconState
|
||||
multi_value_slice.MultiValueSlice[V, O]
|
||||
}
|
||||
|
||||
func (m mv32Byte[V, O]) State() *BeaconState {
|
||||
return m.BeaconState
|
||||
}
|
||||
|
||||
@@ -250,9 +250,11 @@ func newMockHistory(t *testing.T, hist []mockHistorySpec, current primitives.Slo
|
||||
b, err = blocktest.SetBlockParentRoot(b, pr)
|
||||
require.NoError(t, err)
|
||||
|
||||
// now do process_block
|
||||
s, err = transition.ProcessBlockForStateRoot(ctx, s, b)
|
||||
require.NoError(t, err)
|
||||
// now do process_block only if block slot is greater than latest header slot
|
||||
if b.Block().Slot() > s.LatestBlockHeader().Slot {
|
||||
s, err = transition.ProcessBlockForStateRoot(ctx, s, b)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -93,6 +93,10 @@ func (rs *stateReplayer) ReplayBlocks(ctx context.Context) (state.BeaconState, e
|
||||
msg := fmt.Sprintf("error subtracting state.slot %d from replay target slot %d", s.Slot(), rs.target)
|
||||
return nil, errors.Wrap(err, msg)
|
||||
}
|
||||
if diff == 0 {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"startSlot": s.Slot(),
|
||||
"endSlot": rs.target,
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func headerFromBlock(b interfaces.ReadOnlySignedBeaconBlock) (*ethpb.BeaconBlockHeader, error) {
|
||||
@@ -27,6 +28,17 @@ func headerFromBlock(b interfaces.ReadOnlySignedBeaconBlock) (*ethpb.BeaconBlock
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestReplayBlocks_ZeroDiff(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
ctx := context.Background()
|
||||
specs := []mockHistorySpec{{slot: 0}}
|
||||
hist := newMockHistory(t, specs, 0)
|
||||
ch := NewCanonicalHistory(hist, hist, hist)
|
||||
_, err := ch.ReplayerForSlot(0).ReplayBlocks(ctx)
|
||||
require.NoError(t, err)
|
||||
require.LogsDoNotContain(t, logHook, "Replaying canonical blocks from most recent state")
|
||||
}
|
||||
|
||||
func TestReplayBlocks(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
var zero, one, two, three, four, five primitives.Slot = 50, 51, 150, 151, 152, 200
|
||||
|
||||
@@ -75,6 +75,7 @@ go_library(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
@@ -90,6 +91,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync/verify:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
@@ -196,6 +198,7 @@ go_test(
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
@@ -214,6 +217,7 @@ go_test(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync/testing:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -13,12 +13,16 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
db "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
leakybucket "github.com/prysmaticlabs/prysm/v4/container/leaky-bucket"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
@@ -48,12 +52,12 @@ type blobsTestCase struct {
|
||||
}
|
||||
|
||||
type testHandler func(s *Service) rpcHandler
|
||||
type expectedDefiner func(t *testing.T, scs []*ethpb.DeprecatedBlobSidecar, req interface{}) []*expectedBlobChunk
|
||||
type requestFromSidecars func([]*ethpb.DeprecatedBlobSidecar) interface{}
|
||||
type expectedDefiner func(t *testing.T, scs []blocks.ROBlob, req interface{}) []*expectedBlobChunk
|
||||
type requestFromSidecars func([]blocks.ROBlob) interface{}
|
||||
type oldestSlotCallback func(t *testing.T) types.Slot
|
||||
type expectedRequirer func(*testing.T, *Service, []*expectedBlobChunk) func(network.Stream)
|
||||
|
||||
func generateTestBlockWithSidecars(t *testing.T, parent [32]byte, slot types.Slot, nblobs int) (*ethpb.SignedBeaconBlockDeneb, []*ethpb.DeprecatedBlobSidecar) {
|
||||
func generateTestBlockWithSidecars(t *testing.T, parent [32]byte, slot types.Slot, nblobs int) (*ethpb.SignedBeaconBlockDeneb, []blocks.ROBlob) {
|
||||
// Start service with 160 as allowed blocks capacity (and almost zero capacity recovery).
|
||||
stateRoot := bytesutil.PadTo([]byte("stateRoot"), fieldparams.RootLength)
|
||||
receiptsRoot := bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength)
|
||||
@@ -103,32 +107,45 @@ func generateTestBlockWithSidecars(t *testing.T, parent [32]byte, slot types.Slo
|
||||
root, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
sidecars := make([]*ethpb.DeprecatedBlobSidecar, len(commitments))
|
||||
sbb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
sidecars := make([]blocks.ROBlob, len(commitments))
|
||||
for i, c := range block.Block.Body.BlobKzgCommitments {
|
||||
sidecars[i] = generateTestSidecar(root, block, i, c)
|
||||
sidecars[i] = generateTestSidecar(t, root, sbb, i, c)
|
||||
}
|
||||
return block, sidecars
|
||||
}
|
||||
|
||||
func generateTestSidecar(root [32]byte, block *ethpb.SignedBeaconBlockDeneb, index int, commitment []byte) *ethpb.DeprecatedBlobSidecar {
|
||||
func generateTestSidecar(t *testing.T, root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, index int, commitment []byte) blocks.ROBlob {
|
||||
header, err := block.Header()
|
||||
require.NoError(t, err)
|
||||
blob := make([]byte, fieldparams.BlobSize)
|
||||
binary.LittleEndian.PutUint64(blob, uint64(index))
|
||||
sc := ðpb.DeprecatedBlobSidecar{
|
||||
BlockRoot: root[:],
|
||||
Index: uint64(index),
|
||||
Slot: block.Block.Slot,
|
||||
BlockParentRoot: block.Block.ParentRoot,
|
||||
ProposerIndex: block.Block.ProposerIndex,
|
||||
Blob: blob,
|
||||
KzgCommitment: commitment,
|
||||
KzgProof: commitment,
|
||||
pb := ðpb.BlobSidecar{
|
||||
Index: uint64(index),
|
||||
Blob: blob,
|
||||
KzgCommitment: commitment,
|
||||
KzgProof: commitment,
|
||||
SignedBlockHeader: header,
|
||||
}
|
||||
pb.CommitmentInclusionProof = fakeEmptyProof(t, block, pb)
|
||||
|
||||
sc, err := blocks.NewROBlobWithRoot(pb, root)
|
||||
require.NoError(t, err)
|
||||
return sc
|
||||
}
|
||||
|
||||
func fakeEmptyProof(_ *testing.T, _ interfaces.ReadOnlySignedBeaconBlock, _ *ethpb.BlobSidecar) [][]byte {
|
||||
r := make([][]byte, fieldparams.KzgCommitmentInclusionProofDepth)
|
||||
for i := range r {
|
||||
r[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
type expectedBlobChunk struct {
|
||||
code uint8
|
||||
sidecar *ethpb.DeprecatedBlobSidecar
|
||||
sidecar *blocks.ROBlob
|
||||
message string
|
||||
}
|
||||
|
||||
@@ -146,17 +163,19 @@ func (r *expectedBlobChunk) requireExpected(t *testing.T, s *Service, stream net
|
||||
require.NoError(t, err)
|
||||
|
||||
valRoot := s.cfg.chain.GenesisValidatorsRoot()
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(r.sidecar.GetSlot()), valRoot[:])
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(r.sidecar.Slot()), valRoot[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ctxBytes, bytesutil.ToBytes4(c))
|
||||
|
||||
sc := ðpb.DeprecatedBlobSidecar{}
|
||||
sc := ðpb.BlobSidecar{}
|
||||
require.NoError(t, encoding.DecodeWithMaxLength(stream, sc))
|
||||
require.Equal(t, bytesutil.ToBytes32(sc.BlockRoot), bytesutil.ToBytes32(r.sidecar.BlockRoot))
|
||||
require.Equal(t, sc.Index, r.sidecar.Index)
|
||||
rob, err := blocks.NewROBlob(sc)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, rob.BlockRoot(), r.sidecar.BlockRoot())
|
||||
require.Equal(t, rob.Index, r.sidecar.Index)
|
||||
}
|
||||
|
||||
func (c *blobsTestCase) setup(t *testing.T) (*Service, []*ethpb.DeprecatedBlobSidecar, func()) {
|
||||
func (c *blobsTestCase) setup(t *testing.T) (*Service, []blocks.ROBlob, func()) {
|
||||
cfg := params.BeaconConfig()
|
||||
repositionFutureEpochs(cfg)
|
||||
undo, err := params.SetActiveWithUndo(cfg)
|
||||
@@ -174,7 +193,7 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []*ethpb.DeprecatedBlobSi
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
|
||||
sidecars := make([]*ethpb.DeprecatedBlobSidecar, 0)
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
oldest := c.oldestSlot(t)
|
||||
var parentRoot [32]byte
|
||||
for i := 0; i < c.nblocks; i++ {
|
||||
@@ -198,7 +217,7 @@ func (c *blobsTestCase) setup(t *testing.T) (*Service, []*ethpb.DeprecatedBlobSi
|
||||
|
||||
client := p2ptest.NewTestP2P(t)
|
||||
s := &Service{
|
||||
cfg: &config{p2p: client, chain: c.chain, clock: clock, beaconDB: d},
|
||||
cfg: &config{p2p: client, chain: c.chain, clock: clock, beaconDB: d, blobStorage: filesystem.NewEphemeralBlobStorage(t)},
|
||||
rateLimiter: newRateLimiter(client),
|
||||
}
|
||||
|
||||
@@ -223,17 +242,22 @@ func (c *blobsTestCase) run(t *testing.T) {
|
||||
defer cleanup()
|
||||
req := c.requestFromSidecars(sidecars)
|
||||
expect := c.defineExpected(t, sidecars, req)
|
||||
m := map[types.Slot][]*ethpb.DeprecatedBlobSidecar{}
|
||||
for _, sc := range expect {
|
||||
m := map[types.Slot][]blocks.ROBlob{}
|
||||
for i := range expect {
|
||||
sc := expect[i]
|
||||
// If define expected omits a sidecar from an expected result, we don't need to save it.
|
||||
// This can happen in particular when there are no expected results, because the nth part of the
|
||||
// response is an error (or none at all when the whole request is invalid).
|
||||
if sc.sidecar != nil {
|
||||
m[sc.sidecar.Slot] = append(m[sc.sidecar.Slot], sc.sidecar)
|
||||
m[sc.sidecar.Slot()] = append(m[sc.sidecar.Slot()], *sc.sidecar)
|
||||
}
|
||||
}
|
||||
for _, blobSidecars := range m {
|
||||
require.NoError(t, s.cfg.beaconDB.SaveBlobSidecar(context.Background(), blobSidecars))
|
||||
v, err := verification.BlobSidecarSliceNoop(blobSidecars)
|
||||
require.NoError(t, err)
|
||||
for i := range v {
|
||||
require.NoError(t, s.cfg.blobStorage.Save(v[i]))
|
||||
}
|
||||
}
|
||||
if c.total != nil {
|
||||
require.Equal(t, *c.total, len(expect))
|
||||
@@ -293,7 +317,7 @@ func TestTestcaseSetup_BlocksAndBlobs(t *testing.T) {
|
||||
require.Equal(t, maxed, len(sidecars))
|
||||
require.Equal(t, maxed, len(expect))
|
||||
for _, sc := range sidecars {
|
||||
blk, err := s.cfg.beaconDB.Block(ctx, bytesutil.ToBytes32(sc.BlockRoot))
|
||||
blk, err := s.cfg.beaconDB.Block(ctx, sc.BlockRoot())
|
||||
require.NoError(t, err)
|
||||
var found *int
|
||||
comms, err := blk.Block().Body().BlobKzgCommitments()
|
||||
|
||||
@@ -38,7 +38,7 @@ func (s *Service) decodePubsubMessage(msg *pubsub.Message) (ssz.Unmarshaler, err
|
||||
case strings.Contains(topic, p2p.GossipSyncCommitteeMessage) && !strings.Contains(topic, p2p.SyncContributionAndProofSubnetTopicFormat):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SyncCommitteeMessage{})]
|
||||
case strings.Contains(topic, p2p.GossipBlobSidecarMessage):
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.SignedBlobSidecar{})]
|
||||
topic = p2p.GossipTypeMapping[reflect.TypeOf(ðpb.BlobSidecar{})]
|
||||
}
|
||||
|
||||
base := p2p.GossipTopicMappings(topic, 0)
|
||||
|
||||
@@ -22,12 +22,14 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//beacon-chain/sync/verify:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
@@ -113,6 +115,7 @@ go_test(
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v4/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
blocks2 "github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -382,12 +383,12 @@ func lowestSlotNeedsBlob(retentionStart primitives.Slot, bwb []blocks2.BlockWith
|
||||
return nil
|
||||
}
|
||||
|
||||
func sortBlobs(blobs []*p2ppb.DeprecatedBlobSidecar) []*p2ppb.DeprecatedBlobSidecar {
|
||||
func sortBlobs(blobs []blocks.ROBlob) []blocks.ROBlob {
|
||||
sort.Slice(blobs, func(i, j int) bool {
|
||||
if blobs[i].Slot == blobs[j].Slot {
|
||||
if blobs[i].Slot() == blobs[j].Slot() {
|
||||
return blobs[i].Index < blobs[j].Index
|
||||
}
|
||||
return blobs[i].Slot < blobs[j].Slot
|
||||
return blobs[i].Slot() < blobs[j].Slot()
|
||||
})
|
||||
|
||||
return blobs
|
||||
@@ -396,7 +397,7 @@ func sortBlobs(blobs []*p2ppb.DeprecatedBlobSidecar) []*p2ppb.DeprecatedBlobSide
|
||||
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
|
||||
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
|
||||
|
||||
func verifyAndPopulateBlobs(bwb []blocks2.BlockWithVerifiedBlobs, blobs []*p2ppb.DeprecatedBlobSidecar, blobWindowStart primitives.Slot) ([]blocks2.BlockWithVerifiedBlobs, error) {
|
||||
func verifyAndPopulateBlobs(bwb []blocks2.BlockWithVerifiedBlobs, blobs []blocks.ROBlob, blobWindowStart primitives.Slot) ([]blocks2.BlockWithVerifiedBlobs, error) {
|
||||
// Assumes bwb has already been sorted by sortedBlockWithVerifiedBlobSlice.
|
||||
blobs = sortBlobs(blobs)
|
||||
blobi := 0
|
||||
@@ -419,7 +420,7 @@ func verifyAndPopulateBlobs(bwb []blocks2.BlockWithVerifiedBlobs, blobs []*p2ppb
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
bb.Blobs = make([]*p2ppb.DeprecatedBlobSidecar, len(commits))
|
||||
bb.Blobs = make([]blocks.ROBlob, len(commits))
|
||||
for ci := range commits {
|
||||
// There are more expected commitments in this block, but we've run out of blobs from the response
|
||||
// (out-of-bound error guard).
|
||||
@@ -502,7 +503,7 @@ func (f *blocksFetcher) requestBlocks(
|
||||
return prysmsync.SendBeaconBlocksByRangeRequest(ctx, f.chain, f.p2p, pid, req, nil)
|
||||
}
|
||||
|
||||
func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecarsByRangeRequest, pid peer.ID) ([]*p2ppb.DeprecatedBlobSidecar, error) {
|
||||
func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecarsByRangeRequest, pid peer.ID) ([]blocks.ROBlob, error) {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
@@ -962,7 +962,7 @@ func TestTimeToWait(t *testing.T) {
|
||||
|
||||
func TestSortBlobs(t *testing.T) {
|
||||
_, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
shuffled := make([]*ethpb.DeprecatedBlobSidecar, len(blobs))
|
||||
shuffled := make([]blocks.ROBlob, len(blobs))
|
||||
for i := range blobs {
|
||||
shuffled[i] = blobs[i]
|
||||
}
|
||||
@@ -974,10 +974,10 @@ func TestSortBlobs(t *testing.T) {
|
||||
for i := range blobs {
|
||||
expect := blobs[i]
|
||||
actual := sorted[i]
|
||||
require.Equal(t, expect.Slot, actual.Slot)
|
||||
require.Equal(t, expect.Slot(), actual.Slot())
|
||||
require.Equal(t, expect.Index, actual.Index)
|
||||
require.Equal(t, bytesutil.ToBytes48(expect.KzgCommitment), bytesutil.ToBytes48(actual.KzgCommitment))
|
||||
require.Equal(t, bytesutil.ToBytes32(expect.BlockRoot), bytesutil.ToBytes32(actual.BlockRoot))
|
||||
require.Equal(t, expect.BlockRoot(), actual.BlockRoot())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1047,7 +1047,7 @@ func TestBlobRequest(t *testing.T) {
|
||||
require.Equal(t, len(allAfter), int(req.Count))
|
||||
}
|
||||
|
||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithVerifiedBlobs, []*ethpb.DeprecatedBlobSidecar) {
|
||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithVerifiedBlobs, []blocks.ROBlob) {
|
||||
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
for i := range blks {
|
||||
@@ -1073,19 +1073,20 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
// Misalign the slots of the blobs for the first block to simulate them being missing from the response.
|
||||
offByOne := blobs[0].Slot
|
||||
offByOne := blobs[0].Slot()
|
||||
for i := range blobs {
|
||||
if blobs[i].Slot == offByOne {
|
||||
blobs[i].Slot = offByOne + 1
|
||||
if blobs[i].Slot() == offByOne {
|
||||
blobs[i].SignedBlockHeader.Header.Slot = offByOne + 1
|
||||
}
|
||||
}
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorContains(t, "BlockSlot in BlobSidecar does not match the expected slot", err)
|
||||
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].BlockRoot = blobs[0].BlockRoot
|
||||
blobs[lastBlobIdx], err = blocks.NewROBlobWithRoot(blobs[lastBlobIdx].BlobSidecar, blobs[0].BlockRoot())
|
||||
require.NoError(t, err)
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, verify.ErrMismatchedBlobBlockRoot)
|
||||
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].Index = 100
|
||||
@@ -1093,18 +1094,24 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
require.ErrorIs(t, err, verify.ErrIncorrectBlobIndex)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].ProposerIndex = 100
|
||||
blobs[lastBlobIdx].SignedBlockHeader.Header.ProposerIndex = 100
|
||||
blobs[lastBlobIdx], err = blocks.NewROBlob(blobs[lastBlobIdx].BlobSidecar)
|
||||
require.NoError(t, err)
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, verify.ErrMismatchedProposerIndex)
|
||||
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
|
||||
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].BlockParentRoot = blobs[0].BlockParentRoot
|
||||
blobs[lastBlobIdx].SignedBlockHeader.Header.ParentRoot = blobs[0].SignedBlockHeader.Header.ParentRoot
|
||||
blobs[lastBlobIdx], err = blocks.NewROBlob(blobs[lastBlobIdx].BlobSidecar)
|
||||
require.NoError(t, err)
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, verify.ErrMismatchedBlobBlockRoot)
|
||||
require.ErrorIs(t, err, verify.ErrBlobBlockMisaligned)
|
||||
|
||||
var emptyKzg [48]byte
|
||||
bwb, blobs = testSequenceBlockWithBlob(t, 10)
|
||||
blobs[lastBlobIdx].KzgCommitment = emptyKzg[:]
|
||||
blobs[lastBlobIdx], err = blocks.NewROBlob(blobs[lastBlobIdx].BlobSidecar)
|
||||
require.NoError(t, err)
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, firstBlockSlot)
|
||||
require.ErrorIs(t, err, verify.ErrMismatchedBlobCommitments)
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
@@ -163,8 +164,15 @@ func (s *Service) processFetchedDataRegSync(
|
||||
blksWithoutParentCount := 0
|
||||
for _, b := range data.bwb {
|
||||
if len(b.Blobs) > 0 {
|
||||
if err := s.cfg.DB.SaveBlobSidecar(ctx, b.Blobs); err != nil {
|
||||
log.WithError(err).Warn("Failed to save blob sidecar")
|
||||
verified, err := verification.BlobSidecarSliceNoop(b.Blobs)
|
||||
if err != nil {
|
||||
log.WithField("root", b.Block.Root()).WithError(err).Error("blobs failed verification")
|
||||
continue
|
||||
}
|
||||
for i := range verified {
|
||||
if err := s.cfg.BlobStorage.Save(verified[i]); err != nil {
|
||||
log.WithError(err).Warn("Failed to save blob sidecar")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -326,8 +334,14 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
|
||||
if len(bb.Blobs) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := s.cfg.DB.SaveBlobSidecar(ctx, bb.Blobs); err != nil {
|
||||
return errors.Wrapf(err, "failed to save blobs for block %#x", bb.Block.Root())
|
||||
verified, err := verification.BlobSidecarSliceNoop(bb.Blobs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "blobs for root %#x failed verification", bb.Block.Root())
|
||||
}
|
||||
for i := range verified {
|
||||
if err := s.cfg.BlobStorage.Save(verified[i]); err != nil {
|
||||
return errors.Wrapf(err, "failed to save blobs for block %#x", bb.Block.Root())
|
||||
}
|
||||
}
|
||||
blobCount += len(bb.Blobs)
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
blockfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/block"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
@@ -41,6 +42,7 @@ type Config struct {
|
||||
BlockNotifier blockfeed.Notifier
|
||||
ClockWaiter startup.ClockWaiter
|
||||
InitialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
}
|
||||
|
||||
// Service service.
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
"github.com/prysmaticlabs/prysm/v4/async/abool"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
dbtest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
p2pt "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
@@ -382,6 +383,7 @@ func TestService_Resync(t *testing.T) {
|
||||
P2P: p,
|
||||
Chain: mc,
|
||||
StateNotifier: mc.StateNotifier(),
|
||||
BlobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
})
|
||||
assert.NotNil(t, s)
|
||||
assert.Equal(t, primitives.Slot(0), s.cfg.Chain.HeadSlot())
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/blstoexec"
|
||||
@@ -152,3 +153,11 @@ func WithStateNotifier(n statefeed.Notifier) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlobStorage gives the sync package direct access to BlobStorage.
|
||||
func WithBlobStorage(b *filesystem.BlobStorage) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.blobStorage = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -154,7 +154,7 @@ func (s *Service) processAttestations(ctx context.Context, attestations []*ethpb
|
||||
continue
|
||||
}
|
||||
// Broadcasting the signed attestation again once a node is able to process it.
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, signedAtt.Message.Aggregate), signedAtt.Message.Aggregate); err != nil {
|
||||
if err := s.cfg.p2p.BroadcastAttestation(ctx, helpers.ComputeSubnetForAttestation(valCount, signedAtt.Message.Aggregate), signedAtt.Message.Aggregate, false); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/verify"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
@@ -180,39 +181,37 @@ func (s *Service) sendAndSaveBlobSidecars(ctx context.Context, request types.Blo
|
||||
log.WithFields(blobFields(sidecar)).Debug("Received blob sidecar RPC")
|
||||
}
|
||||
|
||||
return s.cfg.beaconDB.SaveBlobSidecar(ctx, sidecars)
|
||||
vscs, err := verification.BlobSidecarSliceNoop(sidecars)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range vscs {
|
||||
if err := s.cfg.blobStorage.Save(vscs[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// constructPendingBlobsRequest creates a request for BlobSidecars by root, considering blobs already in DB.
|
||||
func (s *Service) constructPendingBlobsRequest(ctx context.Context, blockRoot [32]byte, count int) (types.BlobSidecarsByRootReq, error) {
|
||||
knownBlobs, err := s.cfg.beaconDB.BlobSidecarsByRoot(ctx, blockRoot)
|
||||
if err != nil && !errors.Is(err, db.ErrNotFound) {
|
||||
func (s *Service) constructPendingBlobsRequest(ctx context.Context, root [32]byte, commitments int) (types.BlobSidecarsByRootReq, error) {
|
||||
stored, err := s.cfg.blobStorage.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
knownIndices := indexSetFromBlobs(knownBlobs)
|
||||
requestedIndices := filterUnknownIndices(knownIndices, count, blockRoot)
|
||||
|
||||
return requestedIndices, nil
|
||||
return requestsForMissingIndices(stored, commitments, root), nil
|
||||
}
|
||||
|
||||
// Helper function to create a set of known indices.
|
||||
func indexSetFromBlobs(blobs []*eth.DeprecatedBlobSidecar) map[uint64]struct{} {
|
||||
indices := make(map[uint64]struct{})
|
||||
for _, blob := range blobs {
|
||||
indices[blob.Index] = struct{}{}
|
||||
}
|
||||
return indices
|
||||
}
|
||||
|
||||
// Helper function to filter out known indices.
|
||||
func filterUnknownIndices(knownIndices map[uint64]struct{}, count int, blockRoot [32]byte) []*eth.BlobIdentifier {
|
||||
// requestsForMissingIndices constructs a slice of BlobIdentifiers that are missing from
|
||||
// local storage, based on a mapping that represents which indices are locally stored,
|
||||
// and the highest expected index.
|
||||
func requestsForMissingIndices(storedIndices [fieldparams.MaxBlobsPerBlock]bool, commitments int, root [32]byte) []*eth.BlobIdentifier {
|
||||
var ids []*eth.BlobIdentifier
|
||||
for i := uint64(0); i < uint64(count); i++ {
|
||||
if _, exists := knownIndices[i]; exists {
|
||||
continue
|
||||
for i := uint64(0); i < uint64(commitments); i++ {
|
||||
if !storedIndices[i] {
|
||||
ids = append(ids, ð.BlobIdentifier{Index: i, BlockRoot: root[:]})
|
||||
}
|
||||
ids = append(ids, ð.BlobIdentifier{Index: i, BlockRoot: blockRoot[:]})
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
gcache "github.com/patrickmn/go-cache"
|
||||
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/filesystem"
|
||||
db "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
@@ -364,7 +366,7 @@ func TestRecentBeaconBlocksRPCHandler_HandleZeroBlocks(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRequestPendingBlobs(t *testing.T) {
|
||||
s := &Service{}
|
||||
s := &Service{cfg: &config{blobStorage: filesystem.NewEphemeralBlobStorage(t)}}
|
||||
t.Run("old block should not fail", func(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
@@ -393,10 +395,11 @@ func TestRequestPendingBlobs(t *testing.T) {
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.Status{FinalizedEpoch: 1})
|
||||
s := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
beaconDB: db.SetupDB(t),
|
||||
p2p: p1,
|
||||
chain: chain,
|
||||
clock: startup.NewClock(time.Unix(0, 0), [32]byte{}),
|
||||
beaconDB: db.SetupDB(t),
|
||||
blobStorage: filesystem.NewEphemeralBlobStorage(t),
|
||||
},
|
||||
}
|
||||
b := util.NewBeaconBlockDeneb()
|
||||
@@ -409,7 +412,8 @@ func TestRequestPendingBlobs(t *testing.T) {
|
||||
|
||||
func TestConstructPendingBlobsRequest(t *testing.T) {
|
||||
d := db.SetupDB(t)
|
||||
s := &Service{cfg: &config{beaconDB: d}}
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
s := &Service{cfg: &config{beaconDB: d, blobStorage: bs}}
|
||||
ctx := context.Background()
|
||||
|
||||
// No unknown indices.
|
||||
@@ -424,11 +428,23 @@ func TestConstructPendingBlobsRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
// Has indices.
|
||||
blobSidecars := []*ethpb.DeprecatedBlobSidecar{
|
||||
{Index: 0, BlockRoot: root[:]},
|
||||
{Index: 2, BlockRoot: root[:]},
|
||||
header := ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
StateRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
BodyRoot: bytesutil.PadTo([]byte{}, 32),
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte{}, 96),
|
||||
}
|
||||
blobSidecars := []blocks.ROBlob{
|
||||
util.GenerateTestDenebBlobSidecar(t, root, header, 0, bytesutil.PadTo([]byte{}, 48)),
|
||||
util.GenerateTestDenebBlobSidecar(t, root, header, 2, bytesutil.PadTo([]byte{}, 48)),
|
||||
}
|
||||
vscs, err := verification.BlobSidecarSliceNoop(blobSidecars)
|
||||
require.NoError(t, err)
|
||||
for i := range vscs {
|
||||
require.NoError(t, bs.Save(vscs[i]))
|
||||
}
|
||||
require.NoError(t, d.SaveBlobSidecar(ctx, blobSidecars))
|
||||
|
||||
expected := []*eth.BlobIdentifier{
|
||||
{Index: 1, BlockRoot: root[:]},
|
||||
@@ -439,29 +455,8 @@ func TestConstructPendingBlobsRequest(t *testing.T) {
|
||||
require.DeepEqual(t, expected[0].BlockRoot, actual[0].BlockRoot)
|
||||
}
|
||||
|
||||
func TestIndexSetFromBlobs(t *testing.T) {
|
||||
blobs := []*ethpb.DeprecatedBlobSidecar{
|
||||
{Index: 0},
|
||||
{Index: 1},
|
||||
{Index: 2},
|
||||
}
|
||||
|
||||
expected := map[uint64]struct{}{
|
||||
0: {},
|
||||
1: {},
|
||||
2: {},
|
||||
}
|
||||
|
||||
actual := indexSetFromBlobs(blobs)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestFilterUnknownIndices(t *testing.T) {
|
||||
knownIndices := map[uint64]struct{}{
|
||||
0: {},
|
||||
1: {},
|
||||
2: {},
|
||||
}
|
||||
haveIndices := [fieldparams.MaxBlobsPerBlock]bool{true, true, true, false, false, false}
|
||||
|
||||
blockRoot := [32]byte{}
|
||||
count := 5
|
||||
@@ -471,7 +466,7 @@ func TestFilterUnknownIndices(t *testing.T) {
|
||||
{Index: 4, BlockRoot: blockRoot[:]},
|
||||
}
|
||||
|
||||
actual := filterUnknownIndices(knownIndices, count, blockRoot)
|
||||
actual := requestsForMissingIndices(haveIndices, count, blockRoot)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
require.Equal(t, expected[0].Index, actual[0].Index)
|
||||
require.DeepEqual(t, actual[0].BlockRoot, expected[0].BlockRoot)
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/cmd/beacon-chain/flags"
|
||||
@@ -29,15 +28,22 @@ func (s *Service) streamBlobBatch(ctx context.Context, batch blockBatch, wQuota
|
||||
defer span.End()
|
||||
for _, b := range batch.canonical() {
|
||||
root := b.Root()
|
||||
scs, err := s.cfg.beaconDB.BlobSidecarsByRoot(ctx, b.Root())
|
||||
if errors.Is(err, db.ErrNotFound) {
|
||||
continue
|
||||
}
|
||||
idxs, err := s.cfg.blobStorage.Indices(b.Root())
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
return wQuota, errors.Wrapf(err, "could not retrieve sidecars for block root %#x", root)
|
||||
}
|
||||
for _, sc := range scs {
|
||||
for i, l := uint64(0), uint64(len(idxs)); i < l; i++ {
|
||||
// index not available, skip
|
||||
if !idxs[i] {
|
||||
continue
|
||||
}
|
||||
// We won't check for file not found since the .Indices method should normally prevent that from happening.
|
||||
sc, err := s.cfg.blobStorage.Get(b.Root(), i)
|
||||
if err != nil {
|
||||
s.writeErrorResponseToStream(responseCodeServerError, p2ptypes.ErrGeneric.Error(), stream)
|
||||
return wQuota, errors.Wrapf(err, "could not retrieve sidecar: index %d, block root %#x", i, root)
|
||||
}
|
||||
SetStreamWriteDeadline(stream, defaultWriteDuration)
|
||||
if chunkErr := WriteBlobSidecarChunk(stream, s.cfg.chain, s.cfg.p2p.Encoding(), sc); chunkErr != nil {
|
||||
log.WithError(chunkErr).Debug("Could not send a chunked response")
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -24,40 +24,41 @@ func (c *blobsTestCase) defaultOldestSlotByRange(t *testing.T) types.Slot {
|
||||
return oldestSlot
|
||||
}
|
||||
|
||||
func blobRangeRequestFromSidecars(scs []*ethpb.DeprecatedBlobSidecar) interface{} {
|
||||
func blobRangeRequestFromSidecars(scs []blocks.ROBlob) interface{} {
|
||||
maxBlobs := fieldparams.MaxBlobsPerBlock
|
||||
count := uint64(len(scs) / maxBlobs)
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: scs[0].Slot,
|
||||
StartSlot: scs[0].Slot(),
|
||||
Count: count,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blobsTestCase) filterExpectedByRange(t *testing.T, scs []*ethpb.DeprecatedBlobSidecar, req interface{}) []*expectedBlobChunk {
|
||||
func (c *blobsTestCase) filterExpectedByRange(t *testing.T, scs []blocks.ROBlob, req interface{}) []*expectedBlobChunk {
|
||||
var expect []*expectedBlobChunk
|
||||
blockOffset := 0
|
||||
lastRoot := bytesutil.ToBytes32(scs[0].BlockRoot)
|
||||
lastRoot := scs[0].BlockRoot()
|
||||
rreq, ok := req.(*ethpb.BlobSidecarsByRangeRequest)
|
||||
require.Equal(t, true, ok)
|
||||
var writes uint64
|
||||
for _, sc := range scs {
|
||||
root := bytesutil.ToBytes32(sc.BlockRoot)
|
||||
for i := range scs {
|
||||
sc := scs[i]
|
||||
root := sc.BlockRoot()
|
||||
if root != lastRoot {
|
||||
blockOffset += 1
|
||||
}
|
||||
lastRoot = root
|
||||
|
||||
if sc.Slot < c.oldestSlot(t) {
|
||||
if sc.Slot() < c.oldestSlot(t) {
|
||||
continue
|
||||
}
|
||||
if sc.Slot < rreq.StartSlot || sc.Slot > rreq.StartSlot+types.Slot(rreq.Count)-1 {
|
||||
if sc.Slot() < rreq.StartSlot || sc.Slot() > rreq.StartSlot+types.Slot(rreq.Count)-1 {
|
||||
continue
|
||||
}
|
||||
if writes == params.BeaconNetworkConfig().MaxRequestBlobSidecars {
|
||||
continue
|
||||
}
|
||||
expect = append(expect, &expectedBlobChunk{
|
||||
sidecar: sc,
|
||||
sidecar: &sc,
|
||||
code: responseCodeSuccess,
|
||||
message: "",
|
||||
})
|
||||
@@ -107,9 +108,9 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
{
|
||||
name: "10 slots before window, 10 slots after, count = 20",
|
||||
nblocks: 10,
|
||||
requestFromSidecars: func(scs []*ethpb.DeprecatedBlobSidecar) interface{} {
|
||||
requestFromSidecars: func(scs []blocks.ROBlob) interface{} {
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: scs[0].Slot - 10,
|
||||
StartSlot: scs[0].Slot() - 10,
|
||||
Count: 20,
|
||||
}
|
||||
},
|
||||
@@ -117,9 +118,9 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
{
|
||||
name: "request before window, empty response",
|
||||
nblocks: 10,
|
||||
requestFromSidecars: func(scs []*ethpb.DeprecatedBlobSidecar) interface{} {
|
||||
requestFromSidecars: func(scs []blocks.ROBlob) interface{} {
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: scs[0].Slot - 10,
|
||||
StartSlot: scs[0].Slot() - 10,
|
||||
Count: 10,
|
||||
}
|
||||
},
|
||||
@@ -128,9 +129,9 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
{
|
||||
name: "10 blocks * 4 blobs = 40",
|
||||
nblocks: 10,
|
||||
requestFromSidecars: func(scs []*ethpb.DeprecatedBlobSidecar) interface{} {
|
||||
requestFromSidecars: func(scs []blocks.ROBlob) interface{} {
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: scs[0].Slot - 10,
|
||||
StartSlot: scs[0].Slot() - 10,
|
||||
Count: 20,
|
||||
}
|
||||
},
|
||||
@@ -139,9 +140,9 @@ func TestBlobByRangeOK(t *testing.T) {
|
||||
{
|
||||
name: "when request count > MAX_REQUEST_BLOCKS_DENEB, MAX_REQUEST_BLOBS_SIDECARS sidecars in response",
|
||||
nblocks: int(params.BeaconNetworkConfig().MaxRequestBlocksDeneb) + 10,
|
||||
requestFromSidecars: func(scs []*ethpb.DeprecatedBlobSidecar) interface{} {
|
||||
requestFromSidecars: func(scs []blocks.ROBlob) interface{} {
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: scs[0].Slot,
|
||||
StartSlot: scs[0].Slot(),
|
||||
Count: params.BeaconNetworkConfig().MaxRequestBlocksDeneb + 1,
|
||||
}
|
||||
},
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/monitoring/tracing"
|
||||
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -66,10 +65,6 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
}
|
||||
minReqEpoch := blobMinReqEpoch(s.cfg.chain.FinalizedCheckpt().Epoch, slots.ToEpoch(s.cfg.clock.CurrentSlot()))
|
||||
|
||||
buff := struct {
|
||||
root [32]byte
|
||||
scs []*eth.DeprecatedBlobSidecar
|
||||
}{}
|
||||
for i := range blobIdents {
|
||||
if err := ctx.Err(); err != nil {
|
||||
closeStream(stream, log)
|
||||
@@ -82,31 +77,20 @@ func (s *Service) blobSidecarByRootRPCHandler(ctx context.Context, msg interface
|
||||
}
|
||||
s.rateLimiter.add(stream, 1)
|
||||
root, idx := bytesutil.ToBytes32(blobIdents[i].BlockRoot), blobIdents[i].Index
|
||||
if root != buff.root {
|
||||
scs, err := s.cfg.beaconDB.BlobSidecarsByRoot(ctx, root)
|
||||
buff.root, buff.scs = root, scs
|
||||
if err != nil {
|
||||
if errors.Is(err, db.ErrNotFound) {
|
||||
// In case db error path gave us a non-nil value, make sure that other indices for the problem root
|
||||
// are not processed when we reenter the outer loop.
|
||||
buff.scs = nil
|
||||
log.WithError(err).Debugf("BlobSidecar not found in db, root=%x, index=%d", root, idx)
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Errorf("unexpected db error retrieving BlobSidecar, root=%x, index=%d", root, idx)
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return err
|
||||
sc, err := s.cfg.blobStorage.Get(root, idx)
|
||||
if err != nil {
|
||||
if db.IsNotFound(err) {
|
||||
log.WithError(err).Debugf("BlobSidecar not found in db, root=%x, index=%d", root, idx)
|
||||
continue
|
||||
}
|
||||
log.WithError(err).Errorf("unexpected db error retrieving BlobSidecar, root=%x, index=%d", root, idx)
|
||||
s.writeErrorResponseToStream(responseCodeServerError, types.ErrGeneric.Error(), stream)
|
||||
return err
|
||||
}
|
||||
|
||||
if idx >= uint64(len(buff.scs)) {
|
||||
continue
|
||||
}
|
||||
sc := buff.scs[idx]
|
||||
|
||||
// If any root in the request content references a block earlier than minimum_request_epoch,
|
||||
// peers MAY respond with error code 3: ResourceUnavailable or not include the blob in the response.
|
||||
if slots.ToEpoch(sc.Slot) < minReqEpoch {
|
||||
if slots.ToEpoch(sc.Slot()) < minReqEpoch {
|
||||
s.writeErrorResponseToStream(responseCodeResourceUnavailable, types.ErrBlobLTMinRequest.Error(), stream)
|
||||
log.WithError(types.ErrBlobLTMinRequest).
|
||||
Debugf("requested blob for block %#x before minimum_request_epoch", blobIdents[i].BlockRoot)
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
p2pTypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
@@ -23,15 +24,16 @@ func (c *blobsTestCase) defaultOldestSlotByRoot(t *testing.T) types.Slot {
|
||||
return oldest
|
||||
}
|
||||
|
||||
func blobRootRequestFromSidecars(scs []*ethpb.DeprecatedBlobSidecar) interface{} {
|
||||
func blobRootRequestFromSidecars(scs []blocks.ROBlob) interface{} {
|
||||
req := make(p2pTypes.BlobSidecarsByRootReq, 0)
|
||||
for _, sc := range scs {
|
||||
req = append(req, ðpb.BlobIdentifier{BlockRoot: sc.BlockRoot, Index: sc.Index})
|
||||
for i := range scs {
|
||||
sc := scs[i]
|
||||
req = append(req, ðpb.BlobIdentifier{BlockRoot: sc.BlockRootSlice(), Index: sc.Index})
|
||||
}
|
||||
return &req
|
||||
}
|
||||
|
||||
func (c *blobsTestCase) filterExpectedByRoot(t *testing.T, scs []*ethpb.DeprecatedBlobSidecar, r interface{}) []*expectedBlobChunk {
|
||||
func (c *blobsTestCase) filterExpectedByRoot(t *testing.T, scs []blocks.ROBlob, r interface{}) []*expectedBlobChunk {
|
||||
rp, ok := r.(*p2pTypes.BlobSidecarsByRootReq)
|
||||
if !ok {
|
||||
panic("unexpected request type in filterExpectedByRoot")
|
||||
@@ -49,12 +51,13 @@ func (c *blobsTestCase) filterExpectedByRoot(t *testing.T, scs []*ethpb.Deprecat
|
||||
if len(scs) == 0 {
|
||||
return expect
|
||||
}
|
||||
lastRoot := bytesutil.ToBytes32(scs[0].BlockRoot)
|
||||
lastRoot := scs[0].BlockRoot()
|
||||
rootToOffset := make(map[[32]byte]int)
|
||||
rootToOffset[lastRoot] = 0
|
||||
scMap := make(map[[32]byte]map[uint64]*ethpb.DeprecatedBlobSidecar)
|
||||
for _, sc := range scs {
|
||||
root := bytesutil.ToBytes32(sc.BlockRoot)
|
||||
scMap := make(map[[32]byte]map[uint64]blocks.ROBlob)
|
||||
for i := range scs {
|
||||
sc := scs[i]
|
||||
root := sc.BlockRoot()
|
||||
if root != lastRoot {
|
||||
blockOffset += 1
|
||||
rootToOffset[root] = blockOffset
|
||||
@@ -62,11 +65,12 @@ func (c *blobsTestCase) filterExpectedByRoot(t *testing.T, scs []*ethpb.Deprecat
|
||||
lastRoot = root
|
||||
_, ok := scMap[root]
|
||||
if !ok {
|
||||
scMap[root] = make(map[uint64]*ethpb.DeprecatedBlobSidecar)
|
||||
scMap[root] = make(map[uint64]blocks.ROBlob)
|
||||
}
|
||||
scMap[root][sc.Index] = sc
|
||||
}
|
||||
for _, scid := range req {
|
||||
for i := range req {
|
||||
scid := req[i]
|
||||
rootMap, ok := scMap[bytesutil.ToBytes32(scid.BlockRoot)]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("test setup failure, no fixture with root %#x", scid.BlockRoot))
|
||||
@@ -76,7 +80,7 @@ func (c *blobsTestCase) filterExpectedByRoot(t *testing.T, scs []*ethpb.Deprecat
|
||||
panic(fmt.Sprintf("test setup failure, no fixture at index %d with root %#x", scid.Index, scid.BlockRoot))
|
||||
}
|
||||
// Skip sidecars that are supposed to be missing.
|
||||
root := bytesutil.ToBytes32(sc.BlockRoot)
|
||||
root := sc.BlockRoot()
|
||||
if c.missing[rootToOffset[root]] {
|
||||
continue
|
||||
}
|
||||
@@ -86,14 +90,14 @@ func (c *blobsTestCase) filterExpectedByRoot(t *testing.T, scs []*ethpb.Deprecat
|
||||
// will set streamTerminated = true and skip everything else in the test case.
|
||||
if c.expired[rootToOffset[root]] {
|
||||
return append(expect, &expectedBlobChunk{
|
||||
sidecar: sc,
|
||||
sidecar: &sc,
|
||||
code: responseCodeResourceUnavailable,
|
||||
message: p2pTypes.ErrBlobLTMinRequest.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
expect = append(expect, &expectedBlobChunk{
|
||||
sidecar: sc,
|
||||
sidecar: &sc,
|
||||
code: responseCodeSuccess,
|
||||
message: "",
|
||||
})
|
||||
@@ -148,7 +152,7 @@ func readChunkEncodedBlobsLowMax(t *testing.T, s *Service, expect []*expectedBlo
|
||||
encoding := s.cfg.p2p.Encoding()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(s.cfg.clock.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
vf := func(sidecar *ethpb.DeprecatedBlobSidecar) error {
|
||||
vf := func(sidecar blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
return func(stream network.Stream) {
|
||||
@@ -161,7 +165,7 @@ func readChunkEncodedBlobsAsStreamReader(t *testing.T, s *Service, expect []*exp
|
||||
encoding := s.cfg.p2p.Encoding()
|
||||
ctxMap, err := ContextByteVersionsForValRoot(s.cfg.clock.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
vf := func(sidecar *ethpb.DeprecatedBlobSidecar) error {
|
||||
vf := func(sidecar blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
return func(stream network.Stream) {
|
||||
@@ -170,9 +174,9 @@ func readChunkEncodedBlobsAsStreamReader(t *testing.T, s *Service, expect []*exp
|
||||
require.Equal(t, len(expect), len(scs))
|
||||
for i, sc := range scs {
|
||||
esc := expect[i].sidecar
|
||||
require.Equal(t, esc.Slot, sc.Slot)
|
||||
require.Equal(t, esc.Slot(), sc.Slot())
|
||||
require.Equal(t, esc.Index, sc.Index)
|
||||
require.Equal(t, bytesutil.ToBytes32(esc.BlockRoot), bytesutil.ToBytes32(sc.BlockRoot))
|
||||
require.Equal(t, esc.BlockRoot(), sc.BlockRoot())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/encoder"
|
||||
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v4/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
)
|
||||
@@ -159,12 +159,12 @@ func extractBlockDataType(digest []byte, tor blockchain.TemporalOracle) (interfa
|
||||
|
||||
// WriteBlobSidecarChunk writes blob chunk object to stream.
|
||||
// response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||
func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, sidecar *ethpb.DeprecatedBlobSidecar) error {
|
||||
func WriteBlobSidecarChunk(stream libp2pcore.Stream, tor blockchain.TemporalOracle, encoding encoder.NetworkEncoding, sidecar blocks.VerifiedROBlob) error {
|
||||
if _, err := stream.Write([]byte{responseCodeSuccess}); err != nil {
|
||||
return err
|
||||
}
|
||||
valRoot := tor.GenesisValidatorsRoot()
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(sidecar.GetSlot()), valRoot[:])
|
||||
ctxBytes, err := forks.ForkDigestFromEpoch(slots.ToEpoch(sidecar.Slot()), valRoot[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -14,9 +14,11 @@ import (
|
||||
p2ptypes "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
pb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v4/time/slots"
|
||||
@@ -144,7 +146,7 @@ func SendBeaconBlocksByRootRequest(
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.SenderEncoder, pid peer.ID, ctxMap ContextByteVersions, req *pb.BlobSidecarsByRangeRequest) ([]*pb.DeprecatedBlobSidecar, error) {
|
||||
func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.SenderEncoder, pid peer.ID, ctxMap ContextByteVersions, req *pb.BlobSidecarsByRangeRequest) ([]blocks.ROBlob, error) {
|
||||
topic, err := p2p.TopicFromMessage(p2p.BlobSidecarsByRangeName, slots.ToEpoch(tor.CurrentSlot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -166,7 +168,7 @@ func SendBlobsByRangeRequest(ctx context.Context, tor blockchain.TemporalOracle,
|
||||
func SendBlobSidecarByRoot(
|
||||
ctx context.Context, tor blockchain.TemporalOracle, p2pApi p2p.P2P, pid peer.ID,
|
||||
ctxMap ContextByteVersions, req *p2ptypes.BlobSidecarsByRootReq,
|
||||
) ([]*pb.DeprecatedBlobSidecar, error) {
|
||||
) ([]blocks.ROBlob, error) {
|
||||
if uint64(len(*req)) > params.BeaconNetworkConfig().MaxRequestBlobSidecars {
|
||||
return nil, errors.Wrapf(p2ptypes.ErrMaxBlobReqExceeded, "length=%d", len(*req))
|
||||
}
|
||||
@@ -189,16 +191,16 @@ func SendBlobSidecarByRoot(
|
||||
return readChunkEncodedBlobs(stream, p2pApi.Encoding(), ctxMap, blobValidatorFromRootReq(req), max)
|
||||
}
|
||||
|
||||
type blobResponseValidation func(*pb.DeprecatedBlobSidecar) error
|
||||
type blobResponseValidation func(blocks.ROBlob) error
|
||||
|
||||
func blobValidatorFromRootReq(req *p2ptypes.BlobSidecarsByRootReq) blobResponseValidation {
|
||||
roots := make(map[[32]byte]bool)
|
||||
for _, sc := range *req {
|
||||
roots[bytesutil.ToBytes32(sc.BlockRoot)] = true
|
||||
}
|
||||
return func(sc *pb.DeprecatedBlobSidecar) error {
|
||||
if requested := roots[bytesutil.ToBytes32(sc.BlockRoot)]; !requested {
|
||||
return errors.Wrapf(errUnrequestedRoot, "root=%#x", sc.BlockRoot)
|
||||
return func(sc blocks.ROBlob) error {
|
||||
if requested := roots[sc.BlockRoot()]; !requested {
|
||||
return errors.Wrapf(errUnrequestedRoot, "root=%#x", sc.BlockRoot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -206,16 +208,16 @@ func blobValidatorFromRootReq(req *p2ptypes.BlobSidecarsByRootReq) blobResponseV
|
||||
|
||||
func blobValidatorFromRangeReq(req *pb.BlobSidecarsByRangeRequest) blobResponseValidation {
|
||||
end := req.StartSlot + primitives.Slot(req.Count)
|
||||
return func(sc *pb.DeprecatedBlobSidecar) error {
|
||||
if sc.Slot < req.StartSlot || sc.Slot >= end {
|
||||
return errors.Wrapf(errBlobResponseOutOfBounds, "req start,end:%d,%d, resp:%d", req.StartSlot, end, sc.Slot)
|
||||
return func(sc blocks.ROBlob) error {
|
||||
if sc.Slot() < req.StartSlot || sc.Slot() >= end {
|
||||
return errors.Wrapf(errBlobResponseOutOfBounds, "req start,end:%d,%d, resp:%d", req.StartSlot, end, sc.Slot())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func readChunkEncodedBlobs(stream network.Stream, encoding encoder.NetworkEncoding, ctxMap ContextByteVersions, vf blobResponseValidation, max uint64) ([]*pb.DeprecatedBlobSidecar, error) {
|
||||
sidecars := make([]*pb.DeprecatedBlobSidecar, 0)
|
||||
func readChunkEncodedBlobs(stream network.Stream, encoding encoder.NetworkEncoding, ctxMap ContextByteVersions, vf blobResponseValidation, max uint64) ([]blocks.ROBlob, error) {
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
// Attempt an extra read beyond max to check if the peer is violating the spec by
|
||||
// sending more than MAX_REQUEST_BLOB_SIDECARS, or more blobs than requested.
|
||||
for i := uint64(0); i < max+1; i++ {
|
||||
@@ -238,7 +240,9 @@ func readChunkEncodedBlobs(stream network.Stream, encoding encoder.NetworkEncodi
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncoding, ctxMap ContextByteVersions, vf blobResponseValidation) (*pb.DeprecatedBlobSidecar, error) {
|
||||
func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncoding, ctxMap ContextByteVersions, vf blobResponseValidation) (blocks.ROBlob, error) {
|
||||
var b blocks.ROBlob
|
||||
pb := ðpb.BlobSidecar{}
|
||||
decode := encoding.DecodeWithMaxLength
|
||||
var (
|
||||
code uint8
|
||||
@@ -246,31 +250,35 @@ func readChunkedBlobSidecar(stream network.Stream, encoding encoder.NetworkEncod
|
||||
)
|
||||
code, msg, err := ReadStatusCode(stream, encoding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return b, err
|
||||
}
|
||||
if code != 0 {
|
||||
return nil, errors.Wrap(errBlobChunkedReadFailure, msg)
|
||||
return b, errors.Wrap(errBlobChunkedReadFailure, msg)
|
||||
}
|
||||
ctxb, err := readContextFromStream(stream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading chunk context bytes from stream")
|
||||
return b, errors.Wrap(err, "error reading chunk context bytes from stream")
|
||||
}
|
||||
|
||||
v, found := ctxMap[bytesutil.ToBytes4(ctxb)]
|
||||
if !found {
|
||||
return nil, errors.Wrapf(errBlobUnmarshal, fmt.Sprintf("unrecognized fork digest %#x", ctxb))
|
||||
return b, errors.Wrapf(errBlobUnmarshal, fmt.Sprintf("unrecognized fork digest %#x", ctxb))
|
||||
}
|
||||
// Only deneb is supported at this time, because we lack a fork-spanning interface/union type for blobs.
|
||||
if v != version.Deneb {
|
||||
return nil, fmt.Errorf("unexpected context bytes for deneb BlobSidecar, ctx=%#x, v=%s", ctxb, version.String(v))
|
||||
return b, fmt.Errorf("unexpected context bytes for deneb BlobSidecar, ctx=%#x, v=%s", ctxb, version.String(v))
|
||||
}
|
||||
sc := &pb.DeprecatedBlobSidecar{}
|
||||
if err := decode(stream, sc); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode the protobuf-encoded BlobSidecar message from RPC chunk stream")
|
||||
}
|
||||
if err := vf(sc); err != nil {
|
||||
return nil, errors.Wrap(err, "validation failure decoding blob RPC response")
|
||||
if err := decode(stream, pb); err != nil {
|
||||
return b, errors.Wrap(err, "failed to decode the protobuf-encoded BlobSidecar message from RPC chunk stream")
|
||||
}
|
||||
|
||||
return sc, nil
|
||||
rob, err := blocks.NewROBlob(pb)
|
||||
if err != nil {
|
||||
return b, errors.Wrap(err, "unexpected error initializing ROBlob")
|
||||
}
|
||||
if err := vf(rob); err != nil {
|
||||
return b, errors.Wrap(err, "validation failure decoding blob RPC response")
|
||||
}
|
||||
|
||||
return rob, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user