mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Deneb DB methods (#12379)
This commit is contained in:
committed by
james-prysm
parent
d6961f0e28
commit
b1adeeb83c
@@ -54,6 +54,11 @@ type ReadOnlyDatabase interface {
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
|
||||
// Blob operations.
|
||||
BlobSidecarsByRoot(ctx context.Context, beaconBlockRoot [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
BlobSidecarsBySlot(ctx context.Context, slot primitives.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error)
|
||||
|
||||
// origin checkpoint sync support
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
@@ -89,6 +94,10 @@ type NoHeadAccessDatabase interface {
|
||||
SaveFeeRecipientsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, addrs []common.Address) error
|
||||
SaveRegistrationsByValidatorIDs(ctx context.Context, ids []primitives.ValidatorIndex, regs []*ethpb.ValidatorRegistrationV1) error
|
||||
|
||||
// Blob operations.
|
||||
SaveBlobSidecar(ctx context.Context, sidecars []*ethpb.BlobSidecar) error
|
||||
DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"archived_point.go",
|
||||
"backup.go",
|
||||
"blob.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
"deposit_contract.go",
|
||||
@@ -74,6 +75,7 @@ go_test(
|
||||
srcs = [
|
||||
"archived_point_test.go",
|
||||
"backup_test.go",
|
||||
"blob_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
"deposit_contract_test.go",
|
||||
@@ -110,6 +112,7 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/testing:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/assertions:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
252
beacon-chain/db/kv/blob.go
Normal file
252
beacon-chain/db/kv/blob.go
Normal file
@@ -0,0 +1,252 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// SaveBlobSidecar saves the blobs for a given epoch in the sidecar bucket. When we receive a blob:
|
||||
//
|
||||
// 1. Convert slot using a modulo operator to [0, maxSlots] where maxSlots = MAX_BLOB_EPOCHS*SLOTS_PER_EPOCH
|
||||
//
|
||||
// 2. Compute key for blob as bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
//
|
||||
// 3. Begin the save algorithm: If the incoming blob has a slot bigger than the saved slot at the spot
|
||||
// in the rotating keys buffer, we overwrite all elements for that slot.
|
||||
func (s *Store) SaveBlobSidecar(ctx context.Context, scs []*ethpb.BlobSidecar) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlobSidecar")
|
||||
defer span.End()
|
||||
|
||||
sortSideCars(scs)
|
||||
if err := s.verifySideCars(scs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slot := scs[0].Slot
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
newKey := blobSidecarKey(scs[0])
|
||||
rotatingBufferPrefix := newKey[0:8]
|
||||
var replacingKey []byte
|
||||
for k, _ := c.Seek(rotatingBufferPrefix); bytes.HasPrefix(k, rotatingBufferPrefix); k, _ = c.Next() {
|
||||
if len(k) != 0 {
|
||||
replacingKey = k
|
||||
oldSlotBytes := replacingKey[8:16]
|
||||
oldSlot := bytesutil.BytesToSlotBigEndian(oldSlotBytes)
|
||||
if oldSlot >= slot {
|
||||
return fmt.Errorf("attempted to save blob with slot %d but already have older blob with slot %d", slot, oldSlot)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
// If there is no element stored at blob.slot % MAX_SLOTS_TO_PERSIST_BLOBS, then we simply
|
||||
// store the blob by key and exit early.
|
||||
if len(replacingKey) != 0 {
|
||||
if err := bkt.Delete(replacingKey); err != nil {
|
||||
log.WithError(err).Warnf("Could not delete blob with key %#x", replacingKey)
|
||||
}
|
||||
}
|
||||
return bkt.Put(newKey, encodedBlobSidecar)
|
||||
})
|
||||
}
|
||||
|
||||
// verifySideCars ensures that all sidecars have the same slot, parent root, block root, and proposer index.
|
||||
// It also ensures that indices are sequential and start at 0 and no more than MAX_BLOB_EPOCHS.
|
||||
func (s *Store) verifySideCars(scs []*ethpb.BlobSidecar) error {
|
||||
if len(scs) == 0 {
|
||||
return errors.New("nil or empty blob sidecars")
|
||||
}
|
||||
if uint64(len(scs)) > params.BeaconConfig().MaxBlobsPerBlock {
|
||||
return fmt.Errorf("too many sidecars: %d > %d", len(scs), params.BeaconConfig().MaxBlobsPerBlock)
|
||||
}
|
||||
|
||||
sl := scs[0].Slot
|
||||
pr := scs[0].BlockParentRoot
|
||||
r := scs[0].BlockRoot
|
||||
p := scs[0].ProposerIndex
|
||||
|
||||
for i, sc := range scs {
|
||||
if sc.Slot != sl {
|
||||
return fmt.Errorf("sidecar slot mismatch: %d != %d", sc.Slot, sl)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockParentRoot, pr) {
|
||||
return fmt.Errorf("sidecar parent root mismatch: %x != %x", sc.BlockParentRoot, pr)
|
||||
}
|
||||
if !bytes.Equal(sc.BlockRoot, r) {
|
||||
return fmt.Errorf("sidecar root mismatch: %x != %x", sc.BlockRoot, r)
|
||||
}
|
||||
if sc.ProposerIndex != p {
|
||||
return fmt.Errorf("sidecar proposer index mismatch: %d != %d", sc.ProposerIndex, p)
|
||||
}
|
||||
if sc.Index != uint64(i) {
|
||||
return fmt.Errorf("sidecar index mismatch: %d != %d", sc.Index, i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortSideCars sorts the sidecars by their index.
|
||||
func sortSideCars(scs []*ethpb.BlobSidecar) {
|
||||
sort.Slice(scs, func(i, j int) bool {
|
||||
return scs[i].Index < scs[j].Index
|
||||
})
|
||||
}
|
||||
|
||||
// BlobSidecarsByRoot retrieves the blobs for the given beacon block root.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
func (s *Store) BlobSidecarsByRoot(ctx context.Context, root [32]byte, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsByRoot")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if bytes.HasSuffix(k, root[:]) {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
func filterForIndices(sc *ethpb.BlobSidecars, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
if len(indices) == 0 {
|
||||
return sc.Sidecars, nil
|
||||
}
|
||||
// This loop assumes that the BlobSidecars value stores the complete set of blobs for a block
|
||||
// in ascending order from eg 0..3, without gaps. This allows us to assume the indices argument
|
||||
// maps 1:1 with indices in the BlobSidecars storage object.
|
||||
maxIdx := uint64(len(sc.Sidecars)) - 1
|
||||
sidecars := make([]*ethpb.BlobSidecar, len(indices))
|
||||
for i, idx := range indices {
|
||||
if idx > maxIdx {
|
||||
return nil, errors.Wrapf(ErrNotFound, "BlobSidecars missing index: index %d", idx)
|
||||
}
|
||||
sidecars[i] = sc.Sidecars[idx]
|
||||
}
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// BlobSidecarsBySlot retrieves BlobSidecars for the given slot.
|
||||
// If the `indices` argument is omitted, all blobs for the root will be returned.
|
||||
// Otherwise, the result will be filtered to only include the specified indices.
|
||||
// An error will result if an invalid index is specified.
|
||||
// The bucket size is bounded by 131072 entries. That's the most blobs a node will keep before rotating it out.
|
||||
func (s *Store) BlobSidecarsBySlot(ctx context.Context, slot types.Slot, indices ...uint64) ([]*ethpb.BlobSidecar, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.BlobSidecarsBySlot")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
sk := slotKey(slot)
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
c := tx.Bucket(blobsBucket).Cursor()
|
||||
// Bucket size is bounded and bolt cursors are fast. Moreover, a thin caching layer can be added.
|
||||
for k, v := c.Seek(sk); bytes.HasPrefix(k, sk); k, _ = c.Next() {
|
||||
slotInKey := bytesutil.BytesToSlotBigEndian(k[8:16])
|
||||
if slotInKey == slot {
|
||||
enc = v
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
sc := ðpb.BlobSidecars{}
|
||||
if err := decode(ctx, enc, sc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filterForIndices(sc, indices...)
|
||||
}
|
||||
|
||||
// DeleteBlobSidecar returns true if the blobs are in the db.
|
||||
func (s *Store) DeleteBlobSidecar(ctx context.Context, beaconBlockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteBlobSidecar")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
c := bkt.Cursor()
|
||||
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||
if bytes.HasSuffix(k, beaconBlockRoot[:]) {
|
||||
if err := bkt.Delete(k); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// We define a blob sidecar key as: bytes(slot_to_rotating_buffer(blob.slot)) ++ bytes(blob.slot) ++ blob.block_root
|
||||
// where slot_to_rotating_buffer(slot) = slot % MAX_SLOTS_TO_PERSIST_BLOBS.
|
||||
func blobSidecarKey(blob *ethpb.BlobSidecar) []byte {
|
||||
key := slotKey(blob.Slot)
|
||||
key = append(key, bytesutil.SlotToBytesBigEndian(blob.Slot)...)
|
||||
key = append(key, blob.BlockRoot...)
|
||||
return key
|
||||
}
|
||||
|
||||
func slotKey(slot types.Slot) []byte {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
maxEpochsToPersistBlobs := params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest
|
||||
maxSlotsToPersistBlobs := types.Slot(maxEpochsToPersistBlobs.Mul(uint64(slotsPerEpoch)))
|
||||
return bytesutil.SlotToBytesBigEndian(slot.ModSlot(maxSlotsToPersistBlobs))
|
||||
}
|
||||
|
||||
func checkEpochsForBlobSidecarsRequestBucket(db *bolt.DB) error {
|
||||
if err := db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket(epochsForBlobSidecarsRequestBucket)
|
||||
k := []byte("epoch-key")
|
||||
v := b.Get(k)
|
||||
if v == nil {
|
||||
if err := b.Put(k, bytesutil.Uint64ToBytesBigEndian(uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest))); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
e := bytesutil.BytesToUint64BigEndian(v)
|
||||
if e != uint64(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest) {
|
||||
return fmt.Errorf("epochs for blobs request value in DB %d does not match config value %d", e, params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
316
beacon-chain/db/kv/blob_test.go
Normal file
316
beacon-chain/db/kv/blob_test.go
Normal file
@@ -0,0 +1,316 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v4/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/assertions"
|
||||
"github.com/prysmaticlabs/prysm/v4/testing/require"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func equalBlobSlices(expect []*ethpb.BlobSidecar, got []*ethpb.BlobSidecar) error {
|
||||
if len(expect) != len(got) {
|
||||
return fmt.Errorf("mismatched lengths, expect=%d, got=%d", len(expect), len(got))
|
||||
}
|
||||
for i := 0; i < len(expect); i++ {
|
||||
es := expect[i]
|
||||
gs := got[i]
|
||||
var e string
|
||||
assertions.DeepEqual(assertions.SprintfAssertionLoggerFn(&e), es, gs)
|
||||
if e != "" {
|
||||
return errors.New(e)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestStore_BlobSidecars(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 0)
|
||||
require.ErrorContains(t, "nil or empty blob sidecars", db.SaveBlobSidecar(ctx, scs))
|
||||
})
|
||||
t.Run("empty by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
got, err := db.BlobSidecarsByRoot(ctx, [32]byte{})
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("empty by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
got, err := db.BlobSidecarsBySlot(ctx, 1)
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("save and retrieve by root (one)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 1)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, 1, len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve by root (max)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve valid subset by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), 0, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(expect, got))
|
||||
require.Equal(t, uint64(0), got[0].Index)
|
||||
require.Equal(t, uint64(3), got[1].Index)
|
||||
})
|
||||
t.Run("error for invalid index when retrieving by root", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot), uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("save and retrieve by slot (one)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, 1)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, 1, len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve by slot (max)", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
})
|
||||
t.Run("save and retrieve valid subset by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
|
||||
// we'll request indices 0 and 3, so make a slice with those indices for comparison
|
||||
expect := make([]*ethpb.BlobSidecar, 2)
|
||||
expect[0] = scs[0]
|
||||
expect[1] = scs[3]
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, 0, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(expect, got))
|
||||
|
||||
require.Equal(t, uint64(0), got[0].Index)
|
||||
require.Equal(t, uint64(3), got[1].Index)
|
||||
})
|
||||
t.Run("error for invalid index when retrieving by slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
|
||||
got, err := db.BlobSidecarsBySlot(ctx, scs[0].Slot, uint64(len(scs)))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("delete works", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.NoError(t, db.DeleteBlobSidecar(ctx, bytesutil.ToBytes32(scs[0].BlockRoot)))
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
require.Equal(t, 0, len(got))
|
||||
})
|
||||
t.Run("saving a blob with older slot", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(scs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
require.ErrorContains(t, "but already have older blob with slot", db.SaveBlobSidecar(ctx, scs))
|
||||
})
|
||||
t.Run("saving a new blob for rotation", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
scs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, scs))
|
||||
require.Equal(t, int(params.BeaconConfig().MaxBlobsPerBlock), len(scs))
|
||||
oldBlockRoot := scs[0].BlockRoot
|
||||
got, err := db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(oldBlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(scs, got))
|
||||
|
||||
newScs := generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock)
|
||||
newRetentionSlot := primitives.Slot(params.BeaconNetworkConfig().MinEpochsForBlobsSidecarsRequest.Mul(uint64(params.BeaconConfig().SlotsPerEpoch)))
|
||||
for _, sc := range newScs {
|
||||
sc.Slot = sc.Slot + newRetentionSlot
|
||||
}
|
||||
require.NoError(t, db.SaveBlobSidecar(ctx, newScs))
|
||||
|
||||
_, err = db.BlobSidecarsBySlot(ctx, 100)
|
||||
require.ErrorIs(t, ErrNotFound, err)
|
||||
|
||||
got, err = db.BlobSidecarsByRoot(ctx, bytesutil.ToBytes32(newScs[0].BlockRoot))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, equalBlobSlices(newScs, got))
|
||||
})
|
||||
}
|
||||
|
||||
func generateBlobSidecars(t *testing.T, n uint64) []*ethpb.BlobSidecar {
|
||||
blobSidecars := make([]*ethpb.BlobSidecar, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
blobSidecars[i] = generateBlobSidecar(t, i)
|
||||
}
|
||||
return blobSidecars
|
||||
}
|
||||
|
||||
func generateBlobSidecar(t *testing.T, index uint64) *ethpb.BlobSidecar {
|
||||
blob := make([]byte, 131072)
|
||||
_, err := rand.Read(blob)
|
||||
require.NoError(t, err)
|
||||
kzgCommitment := make([]byte, 48)
|
||||
_, err = rand.Read(kzgCommitment)
|
||||
require.NoError(t, err)
|
||||
kzgProof := make([]byte, 48)
|
||||
_, err = rand.Read(kzgProof)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.BlobSidecar{
|
||||
BlockRoot: bytesutil.PadTo([]byte{'a'}, 32),
|
||||
Index: index,
|
||||
Slot: 100,
|
||||
BlockParentRoot: bytesutil.PadTo([]byte{'b'}, 32),
|
||||
ProposerIndex: 101,
|
||||
Blob: blob,
|
||||
KzgCommitment: kzgCommitment,
|
||||
KzgProof: kzgProof,
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_verifySideCars(t *testing.T) {
|
||||
s := setupDB(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
scs []*ethpb.BlobSidecar
|
||||
error string
|
||||
}{
|
||||
{name: "empty", scs: []*ethpb.BlobSidecar{}, error: "nil or empty blob sidecars"},
|
||||
{name: "too many sidecars", scs: generateBlobSidecars(t, params.BeaconConfig().MaxBlobsPerBlock+1), error: "too many sidecars: 5 > 4"},
|
||||
{name: "invalid slot", scs: []*ethpb.BlobSidecar{{Slot: 1}, {Slot: 2}}, error: "sidecar slot mismatch: 2 != 1"},
|
||||
{name: "invalid proposer index", scs: []*ethpb.BlobSidecar{{ProposerIndex: 1}, {ProposerIndex: 2}}, error: "sidecar proposer index mismatch: 2 != 1"},
|
||||
{name: "invalid root", scs: []*ethpb.BlobSidecar{{BlockRoot: []byte{1}}, {BlockRoot: []byte{2}}}, error: "sidecar root mismatch: 02 != 01"},
|
||||
{name: "invalid parent root", scs: []*ethpb.BlobSidecar{{BlockParentRoot: []byte{1}}, {BlockParentRoot: []byte{2}}}, error: "sidecar parent root mismatch: 02 != 01"},
|
||||
{name: "invalid side index", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 0}}, error: "sidecar index mismatch: 0 != 1"},
|
||||
{name: "happy path", scs: []*ethpb.BlobSidecar{{Index: 0}, {Index: 1}}, error: ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := s.verifySideCars(tt.scs)
|
||||
if tt.error != "" {
|
||||
require.Equal(t, tt.error, err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_sortSidecars(t *testing.T) {
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
{Index: 6},
|
||||
{Index: 4},
|
||||
{Index: 2},
|
||||
{Index: 1},
|
||||
{Index: 3},
|
||||
{Index: 5},
|
||||
{},
|
||||
}
|
||||
sortSideCars(scs)
|
||||
for i := 0; i < len(scs)-1; i++ {
|
||||
require.Equal(t, uint64(i), scs[i].Index)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStore_BlobSidecarsByRoot(b *testing.B) {
|
||||
s := setupDB(b)
|
||||
ctx := context.Background()
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
|
||||
{BlockRoot: bytesutil.PadTo([]byte{'a'}, 32), Slot: 0},
|
||||
}))
|
||||
|
||||
err := s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blobsBucket)
|
||||
for i := 1; i < 131071; i++ {
|
||||
r := make([]byte, 32)
|
||||
_, err := rand.Read(r)
|
||||
require.NoError(b, err)
|
||||
scs := []*ethpb.BlobSidecar{
|
||||
{BlockRoot: r, Slot: primitives.Slot(i)},
|
||||
}
|
||||
k := blobSidecarKey(scs[0])
|
||||
encodedBlobSidecar, err := encode(ctx, ðpb.BlobSidecars{Sidecars: scs})
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, bkt.Put(k, encodedBlobSidecar))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(b, err)
|
||||
|
||||
require.NoError(b, s.SaveBlobSidecar(ctx, []*ethpb.BlobSidecar{
|
||||
{BlockRoot: bytesutil.PadTo([]byte{'b'}, 32), Slot: 131071},
|
||||
}))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := s.BlobSidecarsByRoot(ctx, [32]byte{'b'})
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_checkEpochsForBlobSidecarsRequestBucket(t *testing.T) {
|
||||
dbStore := setupDB(t)
|
||||
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First write
|
||||
require.NoError(t, checkEpochsForBlobSidecarsRequestBucket(dbStore.db)) // First check
|
||||
|
||||
nConfig := params.BeaconNetworkConfig()
|
||||
nConfig.MinEpochsForBlobsSidecarsRequest = 42069
|
||||
params.OverrideBeaconNetworkConfig(nConfig)
|
||||
require.ErrorContains(t, "epochs for blobs request value in DB 4096 does not match config value 42069", checkEpochsForBlobSidecarsRequestBucket(dbStore.db))
|
||||
}
|
||||
@@ -818,6 +818,16 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(capellaBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Capella block")
|
||||
}
|
||||
case hasDenebKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal Deneb block")
|
||||
}
|
||||
case hasDenebBlindKey(enc):
|
||||
rawBlock = ðpb.SignedBlindedBeaconBlockDeneb{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
@@ -854,6 +864,8 @@ func marshalBlockFull(
|
||||
return nil, err
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
@@ -888,6 +900,8 @@ func marshalBlockBlinded(
|
||||
return nil, errors.Wrap(err, "could not marshal blinded block")
|
||||
}
|
||||
switch blk.Version() {
|
||||
case version.Deneb:
|
||||
return snappy.Encode(nil, append(denebBlindKey, encodedBlock...)), nil
|
||||
case version.Capella:
|
||||
return snappy.Encode(nil, append(capellaBlindKey, encodedBlock...)), nil
|
||||
case version.Bellatrix:
|
||||
|
||||
@@ -90,6 +90,40 @@ var blockTests = []struct {
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBeaconBlockDeneb()
|
||||
b.Block.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
b.Block.Body.BlobKzgCommitments = [][]byte{
|
||||
bytesutil.PadTo([]byte{0x01}, 48),
|
||||
bytesutil.PadTo([]byte{0x02}, 48),
|
||||
bytesutil.PadTo([]byte{0x03}, 48),
|
||||
bytesutil.PadTo([]byte{0x04}, 48),
|
||||
}
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb blind",
|
||||
newBlock: func(slot primitives.Slot, root []byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
b := util.NewBlindedBeaconBlockDeneb()
|
||||
b.Block.Slot = slot
|
||||
if root != nil {
|
||||
b.Block.ParentRoot = root
|
||||
b.Block.Body.BlobKzgCommitments = [][]byte{
|
||||
bytesutil.PadTo([]byte{0x05}, 48),
|
||||
bytesutil.PadTo([]byte{0x06}, 48),
|
||||
bytesutil.PadTo([]byte{0x07}, 48),
|
||||
bytesutil.PadTo([]byte{0x08}, 48),
|
||||
}
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(b)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
|
||||
@@ -359,6 +393,10 @@ func TestStore_BlocksCRUD_NoCache(t *testing.T) {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := blk.PbDenebBlock(); err == nil {
|
||||
wanted, err = blk.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err := wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
retrievedPb, err := retrievedBlock.Proto()
|
||||
@@ -582,6 +620,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block1.PbDenebBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err := wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err := b.Proto()
|
||||
@@ -604,6 +646,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block2.PbDenebBlock(); err == nil {
|
||||
wanted2, err = block2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wanted2Pb, err := wanted2.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -626,6 +672,10 @@ func TestStore_SaveBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block3.PbDenebBlock(); err == nil {
|
||||
wanted, err = wanted.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err = wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -666,6 +716,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := block1.PbDenebBlock(); err == nil {
|
||||
wanted, err = block1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err := wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err := b.Proto()
|
||||
@@ -687,6 +741,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := genesisBlock.PbDenebBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err = wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -708,6 +766,10 @@ func TestStore_GenesisBlock_CanGetHighestAt(t *testing.T) {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := genesisBlock.PbDenebBlock(); err == nil {
|
||||
wanted, err = genesisBlock.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
wantedPb, err = wanted.Proto()
|
||||
require.NoError(t, err)
|
||||
bPb, err = b.Proto()
|
||||
@@ -808,6 +870,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b1.PbDenebBlock(); err == nil {
|
||||
wanted, err = b1.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
retrieved0Pb, err := retrievedBlocks[0].Proto()
|
||||
require.NoError(t, err)
|
||||
wantedPb, err := wanted.Proto()
|
||||
@@ -828,6 +894,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b2.PbDenebBlock(); err == nil {
|
||||
wanted, err = b2.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
retrieved0Pb, err = retrievedBlocks[0].Proto()
|
||||
require.NoError(t, err)
|
||||
wantedPb, err = wanted.Proto()
|
||||
@@ -842,6 +912,10 @@ func TestStore_BlocksBySlot_BlockRootsBySlot(t *testing.T) {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if _, err := b3.PbDenebBlock(); err == nil {
|
||||
wanted, err = b3.ToBlinded()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
retrieved1Pb, err := retrievedBlocks[1].Proto()
|
||||
require.NoError(t, err)
|
||||
wantedPb, err = wanted.Proto()
|
||||
|
||||
@@ -37,3 +37,17 @@ func hasCapellaBlindKey(enc []byte) bool {
|
||||
}
|
||||
return bytes.Equal(enc[:len(capellaBlindKey)], capellaBlindKey)
|
||||
}
|
||||
|
||||
func hasDenebKey(enc []byte) bool {
|
||||
if len(denebKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebKey)], denebKey)
|
||||
}
|
||||
|
||||
func hasDenebBlindKey(enc []byte) bool {
|
||||
if len(denebBlindKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
|
||||
}
|
||||
|
||||
@@ -129,6 +129,9 @@ var Buckets = [][]byte{
|
||||
|
||||
feeRecipientBucket,
|
||||
registrationBucket,
|
||||
|
||||
blobsBucket,
|
||||
epochsForBlobSidecarsRequestBucket,
|
||||
}
|
||||
|
||||
// NewKVStore initializes a new boltDB key-value store at the directory
|
||||
@@ -199,6 +202,11 @@ func NewKVStore(ctx context.Context, dirPath string) (*Store, error) {
|
||||
if err := kv.setupBlockStorageType(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := checkEpochsForBlobSidecarsRequestBucket(boltDB); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to check epochs for blob sidecars request bucket")
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ var (
|
||||
finalizedCheckpointKey = []byte("finalized-checkpoint")
|
||||
powchainDataKey = []byte("powchain-data")
|
||||
lastValidatedCheckpointKey = []byte("last-validated-checkpoint")
|
||||
blobsBucket = []byte("blobs")
|
||||
|
||||
// Below keys are used to identify objects are to be fork compatible.
|
||||
// Objects that are only compatible with specific forks should be prefixed with such keys.
|
||||
@@ -55,6 +56,9 @@ var (
|
||||
capellaKey = []byte("capella")
|
||||
capellaBlindKey = []byte("blind-capella")
|
||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||
denebKey = []byte("deneb")
|
||||
denebBlindKey = []byte("blind-deneb")
|
||||
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
@@ -70,4 +74,7 @@ var (
|
||||
|
||||
// Migrations
|
||||
migrationsBucket = []byte("migrations")
|
||||
|
||||
// Stores how long to keep the blob sidecars for.
|
||||
epochsForBlobSidecarsRequestBucket = []byte("epochs-for-blob-sidecars-request")
|
||||
)
|
||||
|
||||
@@ -229,34 +229,28 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
// look at issue https://github.com/prysmaticlabs/prysm/issues/9262.
|
||||
switch rawType := states[i].ToProtoUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(rawType)
|
||||
pbState, err := getPhase0PbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
encodedState, err := encode(ctx, pbState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateAltair:
|
||||
pbState, err := statenative.ProtobufBeaconStateAltair(rawType)
|
||||
pbState, err := getAltairPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
@@ -272,13 +266,10 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawType)
|
||||
pbState, err := getBellatrixPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
@@ -294,12 +285,28 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateCapella:
|
||||
pbState, err := statenative.ProtobufBeaconStateCapella(rawType)
|
||||
pbState, err := getCapellaPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pbState == nil {
|
||||
return errors.New("nil state")
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
rawObj, err := pbState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(capellaKey, rawObj...))
|
||||
if err := bucket.Put(rt[:], encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
pbState.Validators = valEntries
|
||||
if err := valIdxBkt.Put(rt[:], validatorKeys[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
pbState, err := getDenebPbState(rawType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valEntries := pbState.Validators
|
||||
pbState.Validators = make([]*ethpb.Validator, 0)
|
||||
@@ -323,6 +330,61 @@ func (s *Store) saveStatesEfficientInternal(ctx context.Context, tx *bolt.Tx, bl
|
||||
return s.storeValidatorEntriesSeparately(ctx, tx, validatorsEntries)
|
||||
}
|
||||
|
||||
func getPhase0PbState(rawState interface{}) (*ethpb.BeaconState, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStatePhase0(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getAltairPbState(rawState interface{}) (*ethpb.BeaconStateAltair, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateAltair(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getBellatrixPbState(rawState interface{}) (*ethpb.BeaconStateBellatrix, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateBellatrix(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getCapellaPbState(rawState interface{}) (*ethpb.BeaconStateCapella, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateCapella(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func getDenebPbState(rawState interface{}) (*ethpb.BeaconStateDeneb, error) {
|
||||
pbState, err := statenative.ProtobufBeaconStateDeneb(rawState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pbState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
return pbState, nil
|
||||
}
|
||||
|
||||
func (s *Store) storeValidatorEntriesSeparately(ctx context.Context, tx *bolt.Tx, validatorsEntries map[string]*ethpb.Validator) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
for hashStr, validatorEntry := range validatorsEntries {
|
||||
@@ -472,6 +534,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasDenebKey(enc):
|
||||
protoState := ðpb.BeaconStateDeneb{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(denebKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Deneb")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
protoState.Validators = validatorEntries
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeDeneb(protoState)
|
||||
case hasCapellaKey(enc):
|
||||
// Marshal state bytes to capella beacon state.
|
||||
protoState := ðpb.BeaconStateCapella{}
|
||||
@@ -579,6 +654,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
if rState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
rawObj, err := rState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
|
||||
default:
|
||||
return nil, errors.New("invalid inner state")
|
||||
}
|
||||
|
||||
@@ -900,6 +900,100 @@ func TestBellatrixState_CanDelete(t *testing.T) {
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestDenebState_CanSaveRetrieve(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
require.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, st.ToProtoUnsafe(), savedS.ToProtoUnsafe())
|
||||
|
||||
savedS, err = db.State(context.Background(), [32]byte{'B'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestDenebState_CanDelete(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 1)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
|
||||
require.NoError(t, db.SaveState(context.Background(), st, r))
|
||||
require.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
require.NoError(t, db.DeleteState(context.Background(), r))
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, state.ReadOnlyBeaconState(nil), savedS, "Unsaved state should've been nil")
|
||||
}
|
||||
|
||||
func TestStateDeneb_CanSaveRetrieveValidatorEntries(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
// enable historical state representation flag to test this
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableHistoricalSpaceRepresentation: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
r := [32]byte{'A'}
|
||||
|
||||
require.Equal(t, false, db.HasState(context.Background(), r))
|
||||
|
||||
stateValidators := validators(10)
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 20)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
require.NoError(t, st.SetValidators(stateValidators))
|
||||
|
||||
ctx := context.Background()
|
||||
require.NoError(t, db.SaveState(ctx, st, r))
|
||||
assert.Equal(t, true, db.HasState(context.Background(), r))
|
||||
|
||||
savedS, err := db.State(context.Background(), r)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, st.Validators(), savedS.Validators(), "saved state with validators and retrieved state are not matching")
|
||||
|
||||
// check if the index of the second state is still present.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
data := idxBkt.Get(r[:])
|
||||
require.NotEqual(t, 0, len(data))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if all the validator entries are still intact in the validator entry bucket.
|
||||
err = db.db.Update(func(tx *bolt.Tx) error {
|
||||
valBkt := tx.Bucket(stateValidatorsBucket)
|
||||
// if any of the original validator entry is not present, then fail the test.
|
||||
for _, val := range stateValidators {
|
||||
hash, hashErr := val.HashTreeRoot()
|
||||
assert.NoError(t, hashErr)
|
||||
data := valBkt.Get(hash[:])
|
||||
require.NotNil(t, data)
|
||||
require.NotEqual(t, 0, len(data))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func BenchmarkState_CheckStateSaveTime_1(b *testing.B) { checkStateSaveTime(b, 1) }
|
||||
func BenchmarkState_CheckStateSaveTime_10(b *testing.B) { checkStateSaveTime(b, 10) }
|
||||
|
||||
|
||||
@@ -275,6 +275,8 @@ func TestGetSpec(t *testing.T) {
|
||||
assert.Equal(t, "51", v)
|
||||
case "MAX_VOLUNTARY_EXITS":
|
||||
assert.Equal(t, "52", v)
|
||||
case "MAX_BLOBS_PER_BLOCK":
|
||||
assert.Equal(t, "4", v)
|
||||
case "TIMELY_HEAD_FLAG_INDEX":
|
||||
assert.Equal(t, "0x35", v)
|
||||
case "TIMELY_SOURCE_FLAG_INDEX":
|
||||
|
||||
@@ -34,7 +34,7 @@ func ComputeFieldRootsWithHasher(ctx context.Context, state *BeaconState) ([][]b
|
||||
case version.Capella:
|
||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
||||
case version.Deneb:
|
||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateCapellaFieldCount)
|
||||
fieldRoots = make([][]byte, params.BeaconConfig().BeaconStateCapellaFieldCount) // Deneb has the same state field count as Capella.
|
||||
}
|
||||
|
||||
// Genesis time root.
|
||||
|
||||
@@ -104,7 +104,8 @@ type BeaconChainConfig struct {
|
||||
MaxVoluntaryExits uint64 `yaml:"MAX_VOLUNTARY_EXITS" spec:"true"` // MaxVoluntaryExits defines the maximum number of validator exits in a block.
|
||||
MaxWithdrawalsPerPayload uint64 `yaml:"MAX_WITHDRAWALS_PER_PAYLOAD" spec:"true"` // MaxWithdrawalsPerPayload defines the maximum number of withdrawals in a block.
|
||||
MaxBlsToExecutionChanges uint64 `yaml:"MAX_BLS_TO_EXECUTION_CHANGES" spec:"true"` // MaxBlsToExecutionChanges defines the maximum number of BLS-to-execution-change objects in a block.
|
||||
MaxValidatorsPerWithdrawalsSweep uint64 `yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP" spec:"true"` //MaxValidatorsPerWithdrawalsSweep bounds the size of the sweep searching for withdrawals per slot.
|
||||
MaxValidatorsPerWithdrawalsSweep uint64 `yaml:"MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP" spec:"true"` // MaxValidatorsPerWithdrawalsSweep bounds the size of the sweep searching for withdrawals per slot.
|
||||
MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" spec:"true"` // MaxBlobsPerBlock defines the maximum number of blobs in a block.
|
||||
|
||||
// BLS domain values.
|
||||
DomainBeaconProposer [4]byte `yaml:"DOMAIN_BEACON_PROPOSER" spec:"true"` // DomainBeaconProposer defines the BLS signature domain for beacon proposal verification.
|
||||
|
||||
@@ -26,23 +26,24 @@ const (
|
||||
)
|
||||
|
||||
var mainnetNetworkConfig = &NetworkConfig{
|
||||
GossipMaxSize: 1 << 20, // 1 MiB
|
||||
GossipMaxSizeBellatrix: 10 * 1 << 20, // 10 MiB
|
||||
MaxChunkSize: 1 << 20, // 1 MiB
|
||||
MaxChunkSizeBellatrix: 10 * 1 << 20, // 10 MiB
|
||||
AttestationSubnetCount: 64,
|
||||
AttestationPropagationSlotRange: 32,
|
||||
MaxRequestBlocks: 1 << 10, // 1024
|
||||
TtfbTimeout: 5 * time.Second,
|
||||
RespTimeout: 10 * time.Second,
|
||||
MaximumGossipClockDisparity: 500 * time.Millisecond,
|
||||
MessageDomainInvalidSnappy: [4]byte{00, 00, 00, 00},
|
||||
MessageDomainValidSnappy: [4]byte{01, 00, 00, 00},
|
||||
ETH2Key: "eth2",
|
||||
AttSubnetKey: "attnets",
|
||||
SyncCommsSubnetKey: "syncnets",
|
||||
MinimumPeersInSubnetSearch: 20,
|
||||
ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524.
|
||||
GossipMaxSize: 1 << 20, // 1 MiB
|
||||
GossipMaxSizeBellatrix: 10 * 1 << 20, // 10 MiB
|
||||
MaxChunkSize: 1 << 20, // 1 MiB
|
||||
MaxChunkSizeBellatrix: 10 * 1 << 20, // 10 MiB
|
||||
AttestationSubnetCount: 64,
|
||||
AttestationPropagationSlotRange: 32,
|
||||
MaxRequestBlocks: 1 << 10, // 1024
|
||||
TtfbTimeout: 5 * time.Second,
|
||||
RespTimeout: 10 * time.Second,
|
||||
MaximumGossipClockDisparity: 500 * time.Millisecond,
|
||||
MessageDomainInvalidSnappy: [4]byte{00, 00, 00, 00},
|
||||
MessageDomainValidSnappy: [4]byte{01, 00, 00, 00},
|
||||
ETH2Key: "eth2",
|
||||
AttSubnetKey: "attnets",
|
||||
SyncCommsSubnetKey: "syncnets",
|
||||
MinimumPeersInSubnetSearch: 20,
|
||||
ContractDeploymentBlock: 11184524, // Note: contract was deployed in block 11052984 but no transactions were sent until 11184524.
|
||||
MinEpochsForBlobsSidecarsRequest: 4096,
|
||||
BootstrapNodes: []string{
|
||||
// Teku team's bootnode
|
||||
"enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA",
|
||||
@@ -159,6 +160,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{
|
||||
MaxWithdrawalsPerPayload: 16,
|
||||
MaxBlsToExecutionChanges: 16,
|
||||
MaxValidatorsPerWithdrawalsSweep: 16384,
|
||||
MaxBlobsPerBlock: 4,
|
||||
|
||||
// BLS domain values.
|
||||
DomainBeaconProposer: bytesutil.Uint32ToBytes4(0x00000000),
|
||||
|
||||
@@ -9,18 +9,19 @@ import (
|
||||
|
||||
// NetworkConfig defines the spec based network parameters.
|
||||
type NetworkConfig struct {
|
||||
GossipMaxSize uint64 `yaml:"GOSSIP_MAX_SIZE"` // GossipMaxSize is the maximum allowed size of uncompressed gossip messages.
|
||||
GossipMaxSizeBellatrix uint64 `yaml:"GOSSIP_MAX_SIZE_BELLATRIX"` // GossipMaxSizeBellatrix is the maximum allowed size of uncompressed gossip messages after the bellatrix epoch.
|
||||
MaxChunkSize uint64 `yaml:"MAX_CHUNK_SIZE"` // MaxChunkSize is the maximum allowed size of uncompressed req/resp chunked responses.
|
||||
MaxChunkSizeBellatrix uint64 `yaml:"MAX_CHUNK_SIZE_BELLATRIX"` // MaxChunkSizeBellatrix is the maximum allowed size of uncompressed req/resp chunked responses after the bellatrix epoch.
|
||||
AttestationSubnetCount uint64 `yaml:"ATTESTATION_SUBNET_COUNT"` // AttestationSubnetCount is the number of attestation subnets used in the gossipsub protocol.
|
||||
AttestationPropagationSlotRange primitives.Slot `yaml:"ATTESTATION_PROPAGATION_SLOT_RANGE"` // AttestationPropagationSlotRange is the maximum number of slots during which an attestation can be propagated.
|
||||
MaxRequestBlocks uint64 `yaml:"MAX_REQUEST_BLOCKS"` // MaxRequestBlocks is the maximum number of blocks in a single request.
|
||||
TtfbTimeout time.Duration `yaml:"TTFB_TIMEOUT"` // TtfbTimeout is the maximum time to wait for first byte of request response (time-to-first-byte).
|
||||
RespTimeout time.Duration `yaml:"RESP_TIMEOUT"` // RespTimeout is the maximum time for complete response transfer.
|
||||
MaximumGossipClockDisparity time.Duration `yaml:"MAXIMUM_GOSSIP_CLOCK_DISPARITY"` // MaximumGossipClockDisparity is the maximum milliseconds of clock disparity assumed between honest nodes.
|
||||
MessageDomainInvalidSnappy [4]byte `yaml:"MESSAGE_DOMAIN_INVALID_SNAPPY"` // MessageDomainInvalidSnappy is the 4-byte domain for gossip message-id isolation of invalid snappy messages.
|
||||
MessageDomainValidSnappy [4]byte `yaml:"MESSAGE_DOMAIN_VALID_SNAPPY"` // MessageDomainValidSnappy is the 4-byte domain for gossip message-id isolation of valid snappy messages.
|
||||
GossipMaxSize uint64 `yaml:"GOSSIP_MAX_SIZE"` // GossipMaxSize is the maximum allowed size of uncompressed gossip messages.
|
||||
GossipMaxSizeBellatrix uint64 `yaml:"GOSSIP_MAX_SIZE_BELLATRIX"` // GossipMaxSizeBellatrix is the maximum allowed size of uncompressed gossip messages after the bellatrix epoch.
|
||||
MaxChunkSize uint64 `yaml:"MAX_CHUNK_SIZE"` // MaxChunkSize is the maximum allowed size of uncompressed req/resp chunked responses.
|
||||
MaxChunkSizeBellatrix uint64 `yaml:"MAX_CHUNK_SIZE_BELLATRIX"` // MaxChunkSizeBellatrix is the maximum allowed size of uncompressed req/resp chunked responses after the bellatrix epoch.
|
||||
AttestationSubnetCount uint64 `yaml:"ATTESTATION_SUBNET_COUNT"` // AttestationSubnetCount is the number of attestation subnets used in the gossipsub protocol.
|
||||
AttestationPropagationSlotRange primitives.Slot `yaml:"ATTESTATION_PROPAGATION_SLOT_RANGE"` // AttestationPropagationSlotRange is the maximum number of slots during which an attestation can be propagated.
|
||||
MaxRequestBlocks uint64 `yaml:"MAX_REQUEST_BLOCKS"` // MaxRequestBlocks is the maximum number of blocks in a single request.
|
||||
TtfbTimeout time.Duration `yaml:"TTFB_TIMEOUT"` // TtfbTimeout is the maximum time to wait for first byte of request response (time-to-first-byte).
|
||||
RespTimeout time.Duration `yaml:"RESP_TIMEOUT"` // RespTimeout is the maximum time for complete response transfer.
|
||||
MaximumGossipClockDisparity time.Duration `yaml:"MAXIMUM_GOSSIP_CLOCK_DISPARITY"` // MaximumGossipClockDisparity is the maximum milliseconds of clock disparity assumed between honest nodes.
|
||||
MessageDomainInvalidSnappy [4]byte `yaml:"MESSAGE_DOMAIN_INVALID_SNAPPY"` // MessageDomainInvalidSnappy is the 4-byte domain for gossip message-id isolation of invalid snappy messages.
|
||||
MessageDomainValidSnappy [4]byte `yaml:"MESSAGE_DOMAIN_VALID_SNAPPY"` // MessageDomainValidSnappy is the 4-byte domain for gossip message-id isolation of valid snappy messages.
|
||||
MinEpochsForBlobsSidecarsRequest primitives.Epoch `yaml:"MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUEST"` // MinEpochsForBlobsSidecarsRequest is the minimum number of epochs the node will keep the blobs for.
|
||||
|
||||
// DiscoveryV5 Config
|
||||
ETH2Key string // ETH2Key is the ENR key of the Ethereum consensus object in an enr.
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/d4l3k/messagediff"
|
||||
@@ -22,6 +23,12 @@ type AssertionTestingTB interface {
|
||||
|
||||
type assertionLoggerFn func(string, ...interface{})
|
||||
|
||||
func SprintfAssertionLoggerFn(s *string) assertionLoggerFn {
|
||||
return func(ef string, eargs ...interface{}) {
|
||||
*s = fmt.Sprintf(ef, eargs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Equal compares values using comparison operator.
|
||||
func Equal(loggerFn assertionLoggerFn, expected, actual interface{}, msg ...interface{}) {
|
||||
if expected != actual {
|
||||
@@ -45,11 +52,44 @@ func DeepEqual(loggerFn assertionLoggerFn, expected, actual interface{}, msg ...
|
||||
if !isDeepEqual(expected, actual) {
|
||||
errMsg := parseMsg("Values are not equal", msg...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
diff, _ := messagediff.PrettyDiff(expected, actual)
|
||||
diff := ""
|
||||
if _, isProto := expected.(proto.Message); isProto {
|
||||
diff = ProtobufPrettyDiff(expected, actual)
|
||||
} else {
|
||||
diff, _ = messagediff.PrettyDiff(expected, actual)
|
||||
}
|
||||
loggerFn("%s:%d %s, want: %#v, got: %#v, diff: %s", filepath.Base(file), line, errMsg, expected, actual, diff)
|
||||
}
|
||||
}
|
||||
|
||||
var protobufPrivateFields = map[string]bool{
|
||||
"sizeCache": true,
|
||||
"state": true,
|
||||
}
|
||||
|
||||
func ProtobufPrettyDiff(a, b interface{}) string {
|
||||
d, _ := messagediff.DeepDiff(a, b)
|
||||
var dstr []string
|
||||
appendNotProto := func(path, str string) {
|
||||
parts := strings.Split(path, ".")
|
||||
if len(parts) > 1 && protobufPrivateFields[parts[1]] {
|
||||
return
|
||||
}
|
||||
dstr = append(dstr, str)
|
||||
}
|
||||
for path, added := range d.Added {
|
||||
appendNotProto(path.String(), fmt.Sprintf("added: %s = %#v\n", path.String(), added))
|
||||
}
|
||||
for path, removed := range d.Removed {
|
||||
appendNotProto(path.String(), fmt.Sprintf("removed: %s = %#v\n", path.String(), removed))
|
||||
}
|
||||
for path, modified := range d.Modified {
|
||||
appendNotProto(path.String(), fmt.Sprintf("modified: %s = %#v\n", path.String(), modified))
|
||||
}
|
||||
sort.Strings(dstr)
|
||||
return strings.Join(dstr, "")
|
||||
}
|
||||
|
||||
// DeepNotEqual compares values using DeepEqual.
|
||||
func DeepNotEqual(loggerFn assertionLoggerFn, expected, actual interface{}, msg ...interface{}) {
|
||||
if isDeepEqual(expected, actual) {
|
||||
|
||||
@@ -1115,3 +1115,140 @@ func SaveBlock(tb assertions.AssertionTestingTB, ctx context.Context, db iface.N
|
||||
require.NoError(tb, db.SaveBlock(ctx, wsb))
|
||||
return wsb
|
||||
}
|
||||
|
||||
// HydrateSignedBeaconBlockDeneb hydrates a signed beacon block with correct field length sizes
|
||||
// to comply with fssz marshalling and unmarshalling rules.
|
||||
func HydrateSignedBeaconBlockDeneb(b *ethpb.SignedBeaconBlockDeneb) *ethpb.SignedBeaconBlockDeneb {
|
||||
if b.Signature == nil {
|
||||
b.Signature = make([]byte, fieldparams.BLSSignatureLength)
|
||||
}
|
||||
b.Block = HydrateBeaconBlockDeneb(b.Block)
|
||||
return b
|
||||
}
|
||||
|
||||
// HydrateBeaconBlockDeneb hydrates a beacon block with correct field length sizes
|
||||
// to comply with fssz marshalling and unmarshalling rules.
|
||||
func HydrateBeaconBlockDeneb(b *ethpb.BeaconBlockDeneb) *ethpb.BeaconBlockDeneb {
|
||||
if b == nil {
|
||||
b = ðpb.BeaconBlockDeneb{}
|
||||
}
|
||||
if b.ParentRoot == nil {
|
||||
b.ParentRoot = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
if b.StateRoot == nil {
|
||||
b.StateRoot = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
b.Body = HydrateBeaconBlockBodyDeneb(b.Body)
|
||||
return b
|
||||
}
|
||||
|
||||
// HydrateBeaconBlockBodyDeneb hydrates a beacon block body with correct field length sizes
|
||||
// to comply with fssz marshalling and unmarshalling rules.
|
||||
func HydrateBeaconBlockBodyDeneb(b *ethpb.BeaconBlockBodyDeneb) *ethpb.BeaconBlockBodyDeneb {
|
||||
if b == nil {
|
||||
b = ðpb.BeaconBlockBodyDeneb{}
|
||||
}
|
||||
if b.RandaoReveal == nil {
|
||||
b.RandaoReveal = make([]byte, fieldparams.BLSSignatureLength)
|
||||
}
|
||||
if b.Graffiti == nil {
|
||||
b.Graffiti = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
if b.Eth1Data == nil {
|
||||
b.Eth1Data = ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
}
|
||||
}
|
||||
if b.SyncAggregate == nil {
|
||||
b.SyncAggregate = ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncAggregateSyncCommitteeBytesLength),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
}
|
||||
if b.ExecutionPayload == nil {
|
||||
b.ExecutionPayload = &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
ExtraData: make([]byte, 0),
|
||||
ExcessDataGas: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// HydrateSignedBlindedBeaconBlockDeneb hydrates a signed blinded beacon block with correct field length sizes
|
||||
// to comply with fssz marshalling and unmarshalling rules.
|
||||
func HydrateSignedBlindedBeaconBlockDeneb(b *ethpb.SignedBlindedBeaconBlockDeneb) *ethpb.SignedBlindedBeaconBlockDeneb {
|
||||
if b.Signature == nil {
|
||||
b.Signature = make([]byte, fieldparams.BLSSignatureLength)
|
||||
}
|
||||
b.Block = HydrateBlindedBeaconBlockDeneb(b.Block)
|
||||
return b
|
||||
}
|
||||
|
||||
// HydrateBlindedBeaconBlockDeneb hydrates a blinded beacon block with correct field length sizes
|
||||
// to comply with fssz marshalling and unmarshalling rules.
|
||||
func HydrateBlindedBeaconBlockDeneb(b *ethpb.BlindedBeaconBlockDeneb) *ethpb.BlindedBeaconBlockDeneb {
|
||||
if b == nil {
|
||||
b = ðpb.BlindedBeaconBlockDeneb{}
|
||||
}
|
||||
if b.ParentRoot == nil {
|
||||
b.ParentRoot = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
if b.StateRoot == nil {
|
||||
b.StateRoot = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
b.Body = HydrateBlindedBeaconBlockBodyDeneb(b.Body)
|
||||
return b
|
||||
}
|
||||
|
||||
// HydrateBlindedBeaconBlockBodyDeneb hydrates a blinded beacon block body with correct field length sizes
|
||||
// to comply with fssz marshalling and unmarshalling rules.
|
||||
func HydrateBlindedBeaconBlockBodyDeneb(b *ethpb.BlindedBeaconBlockBodyDeneb) *ethpb.BlindedBeaconBlockBodyDeneb {
|
||||
if b == nil {
|
||||
b = ðpb.BlindedBeaconBlockBodyDeneb{}
|
||||
}
|
||||
if b.RandaoReveal == nil {
|
||||
b.RandaoReveal = make([]byte, fieldparams.BLSSignatureLength)
|
||||
}
|
||||
if b.Graffiti == nil {
|
||||
b.Graffiti = make([]byte, 32)
|
||||
}
|
||||
if b.Eth1Data == nil {
|
||||
b.Eth1Data = ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
if b.SyncAggregate == nil {
|
||||
b.SyncAggregate = ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncAggregateSyncCommitteeBytesLength),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
}
|
||||
if b.ExecutionPayloadHeader == nil {
|
||||
b.ExecutionPayloadHeader = &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
ExcessDataGas: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -30,6 +30,16 @@ func NewBlindedBeaconBlockCapella() *ethpb.SignedBlindedBeaconBlockCapella {
|
||||
return HydrateSignedBlindedBeaconBlockCapella(ðpb.SignedBlindedBeaconBlockCapella{})
|
||||
}
|
||||
|
||||
// NewBeaconBlockDeneb creates a beacon block with minimum marshalable fields.
|
||||
func NewBeaconBlockDeneb() *ethpb.SignedBeaconBlockDeneb {
|
||||
return HydrateSignedBeaconBlockDeneb(ðpb.SignedBeaconBlockDeneb{})
|
||||
}
|
||||
|
||||
// NewBlindedBeaconBlockDeneb creates a blinded beacon block with minimum marshalable fields.
|
||||
func NewBlindedBeaconBlockDeneb() *ethpb.SignedBlindedBeaconBlockDeneb {
|
||||
return HydrateSignedBlindedBeaconBlockDeneb(ðpb.SignedBlindedBeaconBlockDeneb{})
|
||||
}
|
||||
|
||||
// NewBlindedBeaconBlockCapellaV2 creates a blinded beacon block with minimum marshalable fields.
|
||||
func NewBlindedBeaconBlockCapellaV2() *v2.SignedBlindedBeaconBlockCapella {
|
||||
return HydrateV2SignedBlindedBeaconBlockCapella(&v2.SignedBlindedBeaconBlockCapella{})
|
||||
|
||||
Reference in New Issue
Block a user