mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-12 14:58:03 -05:00
Compare commits
3 Commits
dependent_
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37c5178fa8 | ||
|
|
13f8e7b47f | ||
|
|
124eadd56e |
@@ -74,7 +74,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -174,7 +173,6 @@ go_test(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
@@ -30,7 +29,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -291,19 +289,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
|
||||
return errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -468,73 +453,6 @@ func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSupernode := flags.Get().Supernode
|
||||
isSemiSupernode := flags.Get().SemiSupernode
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
|
||||
}
|
||||
|
||||
// Compute the target custody group count based on current flag configuration.
|
||||
targetCustodyGroupCount := custodyRequirement
|
||||
|
||||
// Supernode: custody all groups (either currently set or previously enabled)
|
||||
if isSupernode {
|
||||
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
|
||||
if isSemiSupernode {
|
||||
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "minimum custody group count")
|
||||
}
|
||||
|
||||
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
|
||||
if slot < fuluForkSlot {
|
||||
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "earliest slot")
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
if isSupernode {
|
||||
log.WithFields(logrus.Fields{
|
||||
"current": actualCustodyGroupCount,
|
||||
"target": cfg.NumberOfCustodyGroups,
|
||||
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
|
||||
}
|
||||
|
||||
if wasSupernode && !isSupernode {
|
||||
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, actualCustodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
@@ -551,19 +469,3 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := cfg.FuluForkEpoch
|
||||
if fuluForkEpoch == cfg.FarFutureEpoch {
|
||||
return cfg.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -23,11 +23,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -596,218 +594,3 @@ func TestNotifyIndex(t *testing.T) {
|
||||
t.Errorf("Notifier channel did not receive the index")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
const (
|
||||
fuluForkEpoch = 10
|
||||
custodyRequirement = uint64(4)
|
||||
earliestStoredSlot = primitives.Slot(12)
|
||||
numberOfCustodyGroups = uint64(64)
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
cfg.CustodyRequirement = custodyRequirement
|
||||
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := t.Context()
|
||||
pbBlock := util.NewBeaconBlock()
|
||||
pbBlock.Block.Slot = 12
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CGC increases before fulu", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Before Fulu
|
||||
// -----------
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(19)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// After Fulu
|
||||
// ----------
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("CGC increases after fulu", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Before Fulu
|
||||
// -----------
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
// After Fulu
|
||||
// ----------
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("Supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.Supernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should still be supernode
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.SemiSupernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start with semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Upgrade to full supernode
|
||||
gFlags.SemiSupernode = false
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should upgrade to full supernode
|
||||
upgradeSlot := slot + 2
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Mock a high custody requirement (simulating many validators)
|
||||
// We need to override the custody requirement calculation
|
||||
// For this test, we'll verify the logic by checking if custodyRequirement > 64
|
||||
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
|
||||
// This would require a different test setup with actual validators
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
// With low validator requirements (4), should use semi-supernode minimum (64)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -89,6 +89,7 @@ type NoHeadAccessDatabase interface {
|
||||
SaveBlocks(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock) error
|
||||
SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error
|
||||
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SlotByBlockRoot(context.Context, [32]byte) (primitives.Slot, error)
|
||||
// State related methods.
|
||||
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
|
||||
SaveStates(ctx context.Context, states []state.ReadOnlyBeaconState, blockRoots [][32]byte) error
|
||||
@@ -96,6 +97,7 @@ type NoHeadAccessDatabase interface {
|
||||
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
|
||||
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
|
||||
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
|
||||
SlotInDiffTree(primitives.Slot) (uint64, int, error)
|
||||
// Checkpoint operations.
|
||||
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
|
||||
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
|
||||
|
||||
@@ -32,6 +32,7 @@ go_library(
|
||||
"state_diff_helpers.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
"testing_helpers.go",
|
||||
"utils.go",
|
||||
"validated_checkpoint.go",
|
||||
"wss.go",
|
||||
|
||||
@@ -23,6 +23,16 @@ const (
|
||||
The data at level 0 is saved every 2**exponent[0] slots and always contains a full state snapshot that is used as a base for the delta saved at other levels.
|
||||
*/
|
||||
|
||||
// SlotInDiffTree returns whether the given slot is a saving point in the diff tree.
|
||||
// It it is, it also returns the offset and level in the tree.
|
||||
func (s *Store) SlotInDiffTree(slot primitives.Slot) (uint64, int, error) {
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return 0, -1, ErrSlotBeforeOffset
|
||||
}
|
||||
return offset, computeLevel(offset, slot), nil
|
||||
}
|
||||
|
||||
// saveStateByDiff takes a state and decides between saving a full state snapshot or a diff.
|
||||
func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.saveStateByDiff")
|
||||
@@ -33,13 +43,10 @@ func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconStat
|
||||
}
|
||||
|
||||
slot := st.Slot()
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return ErrSlotBeforeOffset
|
||||
offset, lvl, err := s.SlotInDiffTree(slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if slot is in diff tree")
|
||||
}
|
||||
|
||||
// Find the level to save the state.
|
||||
lvl := computeLevel(offset, slot)
|
||||
if lvl == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
37
beacon-chain/db/kv/testing_helpers.go
Normal file
37
beacon-chain/db/kv/testing_helpers.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// InitStateDiffCacheForTesting initializes the state diff cache with the given offset.
|
||||
// This is intended for testing purposes when setting up state diff after database creation.
|
||||
// This file is only compiled when the "testing" build tag is set.
|
||||
func (s *Store) InitStateDiffCacheForTesting(t testing.TB, offset uint64) error {
|
||||
// First, set the offset in the database.
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, offset)
|
||||
return bucket.Put([]byte("offset"), offsetBytes)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then create the state diff cache.
|
||||
sdCache, err := newStateDiffCache(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.stateDiffCache = sdCache
|
||||
return nil
|
||||
}
|
||||
@@ -29,6 +29,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/sync/backfill/coverage:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -68,11 +69,14 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/testing:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
|
||||
@@ -5,10 +5,13 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -25,6 +28,10 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
s.migrationLock.Lock()
|
||||
defer s.migrationLock.Unlock()
|
||||
|
||||
if features.Get().EnableStateDiff {
|
||||
return s.migrateToColdHdiff(ctx, fRoot)
|
||||
}
|
||||
|
||||
s.finalizedInfo.lock.RLock()
|
||||
oldFSlot := s.finalizedInfo.slot
|
||||
s.finalizedInfo.lock.RUnlock()
|
||||
@@ -90,21 +97,8 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s.beaconDB.HasState(ctx, aRoot) {
|
||||
// If you are migrating a state and its already part of the hot state cache saved to the db,
|
||||
// you can just remove it from the hot state cache as it becomes redundant.
|
||||
s.saveHotStateDB.lock.Lock()
|
||||
roots := s.saveHotStateDB.blockRootsOfSavedStates
|
||||
for i := range roots {
|
||||
if aRoot == roots[i] {
|
||||
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
|
||||
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
|
||||
// Break here is ok.
|
||||
break
|
||||
}
|
||||
}
|
||||
s.saveHotStateDB.lock.Unlock()
|
||||
s.migrateHotToCold(aRoot)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -129,3 +123,103 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateToColdHdiff saves the state-diffs for slots that are in the state diff tree after finalization
|
||||
func (s *State) migrateToColdHdiff(ctx context.Context, fRoot [32]byte) error {
|
||||
s.finalizedInfo.lock.RLock()
|
||||
oldFSlot := s.finalizedInfo.slot
|
||||
s.finalizedInfo.lock.RUnlock()
|
||||
fSlot, err := s.beaconDB.SlotByBlockRoot(ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get slot by block root")
|
||||
}
|
||||
for slot := oldFSlot; slot < fSlot; slot++ {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
_, lvl, err := s.beaconDB.SlotInDiffTree(slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("could not determine if slot %d is in diff tree", slot)
|
||||
continue
|
||||
}
|
||||
if lvl == -1 {
|
||||
continue
|
||||
}
|
||||
// The state needs to be saved.
|
||||
// Try the epoch boundary cache first.
|
||||
cached, exists, err := s.epochBoundaryStateCache.getBySlot(slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("could not get epoch boundary state for slot %d", slot)
|
||||
cached = nil
|
||||
exists = false
|
||||
}
|
||||
var aRoot [32]byte
|
||||
var aState state.BeaconState
|
||||
if exists {
|
||||
aRoot = cached.root
|
||||
aState = cached.state
|
||||
} else {
|
||||
_, roots, err := s.beaconDB.HighestRootsBelowSlot(ctx, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Given the block has been finalized, the db should not have more than one block in a given slot.
|
||||
// We should error out when this happens.
|
||||
if len(roots) != 1 {
|
||||
return errUnknownBlock
|
||||
}
|
||||
aRoot = roots[0]
|
||||
// Different than the legacy MigrateToCold, we need to always get the state even if
|
||||
// the state exists in DB as part of the hot state db, because we need to process slots
|
||||
// to the state diff tree slots.
|
||||
aState, err = s.StateByRoot(ctx, aRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if s.beaconDB.HasState(ctx, aRoot) {
|
||||
s.migrateHotToCold(aRoot)
|
||||
continue
|
||||
}
|
||||
// advance slots to the target slot
|
||||
if aState.Slot() < slot {
|
||||
aState, err = transition.ProcessSlots(ctx, aState, slot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not process slots to slot %d", slot)
|
||||
}
|
||||
}
|
||||
if err := s.beaconDB.SaveState(ctx, aState, aRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(
|
||||
logrus.Fields{
|
||||
"slot": aState.Slot(),
|
||||
"root": fmt.Sprintf("%#x", aRoot),
|
||||
}).Info("Saved state in DB")
|
||||
}
|
||||
// Update finalized info in memory.
|
||||
fInfo, ok, err := s.epochBoundaryStateCache.getByBlockRoot(fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ok {
|
||||
s.SaveFinalizedState(fSlot, fRoot, fInfo.state)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *State) migrateHotToCold(aRoot [32]byte) {
|
||||
// If you are migrating a state and its already part of the hot state cache saved to the db,
|
||||
// you can just remove it from the hot state cache as it becomes redundant.
|
||||
s.saveHotStateDB.lock.Lock()
|
||||
roots := s.saveHotStateDB.blockRootsOfSavedStates
|
||||
for i := range roots {
|
||||
if aRoot == roots[i] {
|
||||
s.saveHotStateDB.blockRootsOfSavedStates = append(roots[:i], roots[i+1:]...)
|
||||
// There shouldn't be duplicated roots in `blockRootsOfSavedStates`.
|
||||
// Break here is ok.
|
||||
break
|
||||
}
|
||||
}
|
||||
s.saveHotStateDB.lock.Unlock()
|
||||
}
|
||||
|
||||
@@ -4,8 +4,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
|
||||
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -224,3 +227,170 @@ func TestMigrateToCold_ParallelCalls(t *testing.T) {
|
||||
assert.DeepEqual(t, [][32]byte{r7}, service.saveHotStateDB.blockRootsOfSavedStates, "Did not remove all saved hot state roots")
|
||||
require.LogsContain(t, hook, "Saved state in DB")
|
||||
}
|
||||
|
||||
// =========================================================================
|
||||
// Tests for migrateToColdHdiff (state diff migration)
|
||||
// =========================================================================
|
||||
|
||||
// setStateDiffExponents sets state diff exponents for testing.
|
||||
// Uses exponents [6, 5] which means:
|
||||
// - Level 0: Every 2^6 = 64 slots (full snapshot)
|
||||
// - Level 1: Every 2^5 = 32 slots (diff)
|
||||
func setStateDiffExponents() {
|
||||
globalFlags := flags.GlobalFlags{
|
||||
StateDiffExponents: []int{6, 5},
|
||||
}
|
||||
flags.Init(&globalFlags)
|
||||
}
|
||||
|
||||
// TestMigrateToColdHdiff_CanUpdateFinalizedInfo verifies that the migration
|
||||
// correctly updates finalized info when migrating to slots not in the diff tree.
|
||||
func TestMigrateToColdHdiff_CanUpdateFinalizedInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// Set exponents and create DB first (without EnableStateDiff flag).
|
||||
setStateDiffExponents()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
// Initialize the state diff cache via the method on *kv.Store (not in interface).
|
||||
require.NoError(t, beaconDB.(*kv.Store).InitStateDiffCacheForTesting(t, 0))
|
||||
// Now enable the feature flag.
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
|
||||
defer resetCfg()
|
||||
service := New(beaconDB, doublylinkedtree.New())
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
|
||||
// Put genesis state in epoch boundary cache so migrateToColdHdiff doesn't need to retrieve from DB.
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(gRoot, beaconState))
|
||||
|
||||
// Set initial finalized info at genesis.
|
||||
service.finalizedInfo = &finalizedInfo{
|
||||
slot: 0,
|
||||
root: gRoot,
|
||||
state: beaconState,
|
||||
}
|
||||
|
||||
// Create finalized block at slot 10 (not in diff tree, so no intermediate states saved).
|
||||
finalizedState := beaconState.Copy()
|
||||
require.NoError(t, finalizedState.SetSlot(10))
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
fRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, b)
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(fRoot, finalizedState))
|
||||
|
||||
require.NoError(t, service.MigrateToCold(ctx, fRoot))
|
||||
|
||||
// Verify finalized info is updated.
|
||||
assert.Equal(t, primitives.Slot(10), service.finalizedInfo.slot)
|
||||
assert.DeepEqual(t, fRoot, service.finalizedInfo.root)
|
||||
expectedHTR, err := finalizedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
actualHTR, err := service.finalizedInfo.state.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedHTR, actualHTR)
|
||||
}
|
||||
|
||||
// TestMigrateToColdHdiff_SkipsSlotsNotInDiffTree verifies that the migration
|
||||
// skips slots that are not in the diff tree.
|
||||
func TestMigrateToColdHdiff_SkipsSlotsNotInDiffTree(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := t.Context()
|
||||
// Set exponents and create DB first (without EnableStateDiff flag).
|
||||
setStateDiffExponents()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
// Initialize the state diff cache via the method on *kv.Store (not in interface).
|
||||
require.NoError(t, beaconDB.(*kv.Store).InitStateDiffCacheForTesting(t, 0))
|
||||
// Now enable the feature flag.
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
|
||||
defer resetCfg()
|
||||
service := New(beaconDB, doublylinkedtree.New())
|
||||
|
||||
beaconState, pks := util.DeterministicGenesisState(t, 32)
|
||||
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
|
||||
// Start from slot 1 to avoid slot 0 which is in the diff tree.
|
||||
service.finalizedInfo = &finalizedInfo{
|
||||
slot: 1,
|
||||
root: gRoot,
|
||||
state: beaconState,
|
||||
}
|
||||
|
||||
// Reset the log hook to ignore setup logs.
|
||||
hook.Reset()
|
||||
|
||||
// Create a block at slot 20 (NOT in diff tree with exponents [6,5]).
|
||||
b20, err := util.GenerateFullBlock(beaconState, pks, util.DefaultBlockGenConfig(), 20)
|
||||
require.NoError(t, err)
|
||||
r20, err := b20.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, b20)
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Slot: 20, Root: r20[:]}))
|
||||
|
||||
// Put finalized state in cache.
|
||||
finalizedState := beaconState.Copy()
|
||||
require.NoError(t, finalizedState.SetSlot(20))
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(r20, finalizedState))
|
||||
|
||||
require.NoError(t, service.MigrateToCold(ctx, r20))
|
||||
|
||||
// Verify NO states were saved during migration (slots 1-19 are not in diff tree).
|
||||
assert.LogsDoNotContain(t, hook, "Saved state in DB")
|
||||
}
|
||||
|
||||
// TestMigrateToColdHdiff_NoOpWhenFinalizedSlotNotAdvanced verifies that
|
||||
// migration is a no-op when the finalized slot has not advanced.
|
||||
func TestMigrateToColdHdiff_NoOpWhenFinalizedSlotNotAdvanced(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// Set exponents and create DB first (without EnableStateDiff flag).
|
||||
setStateDiffExponents()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
// Initialize the state diff cache via the method on *kv.Store (not in interface).
|
||||
require.NoError(t, beaconDB.(*kv.Store).InitStateDiffCacheForTesting(t, 0))
|
||||
// Now enable the feature flag.
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
|
||||
defer resetCfg()
|
||||
service := New(beaconDB, doublylinkedtree.New())
|
||||
|
||||
beaconState, _ := util.DeterministicGenesisState(t, 32)
|
||||
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
gRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, gRoot))
|
||||
|
||||
// Set finalized info already at slot 50.
|
||||
finalizedState := beaconState.Copy()
|
||||
require.NoError(t, finalizedState.SetSlot(50))
|
||||
service.finalizedInfo = &finalizedInfo{
|
||||
slot: 50,
|
||||
root: gRoot,
|
||||
state: finalizedState,
|
||||
}
|
||||
|
||||
// Create block at same slot 50.
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 50
|
||||
fRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, b)
|
||||
require.NoError(t, service.epochBoundaryStateCache.put(fRoot, finalizedState))
|
||||
|
||||
// Migration should be a no-op (finalized slot not advancing).
|
||||
require.NoError(t, service.MigrateToCold(ctx, fRoot))
|
||||
}
|
||||
|
||||
@@ -10,20 +10,72 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var nilFinalizedStateError = errors.New("finalized state is nil")
|
||||
|
||||
func (s *Service) maintainCustodyInfo() {
|
||||
func (s *Service) maintainCustodyInfo() error {
|
||||
// Rationale of slot choice:
|
||||
// - If syncing with an empty DB from genesis, then justifiedSlot = finalizedSlot = 0,
|
||||
// and the node starts to sync from slot 0 ==> Using justifiedSlot is correct.
|
||||
// - If syncing with an empty DB from a checkpoint, then justifiedSlot = finalizedSlot = checkpointSlot,
|
||||
// and the node starts to sync from checkpointSlot ==> Using justifiedSlot is correct.
|
||||
// - If syncing with a non-empty DB, then justifiedSlot > finalizedSlot,
|
||||
// and the node starts to sync from justifiedSlot + 1 ==> Using justifiedSlot + 1 is correct.
|
||||
const interval = 1 * time.Minute
|
||||
|
||||
finalizedCheckpoint, err := s.cfg.beaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "finalized checkpoint")
|
||||
}
|
||||
|
||||
if finalizedCheckpoint == nil {
|
||||
return errors.New("finalized checkpoint is nil")
|
||||
}
|
||||
|
||||
finalizedSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "epoch start for finalized slot")
|
||||
}
|
||||
|
||||
justifiedCheckpoint, err := s.cfg.beaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "justified checkpoint")
|
||||
}
|
||||
|
||||
if justifiedCheckpoint == nil {
|
||||
return errors.New("justified checkpoint is nil")
|
||||
}
|
||||
|
||||
justifiedSlot, err := slots.EpochStart(justifiedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "epoch start for justified slot")
|
||||
}
|
||||
|
||||
slot := justifiedSlot
|
||||
if justifiedSlot > finalizedSlot {
|
||||
slot++
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.p2p.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
|
||||
return errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
async.RunEvery(s.ctx, interval, func() {
|
||||
if err := s.updateCustodyInfoIfNeeded(); err != nil {
|
||||
log.WithError(err).Error("Failed to update custody info")
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateCustodyInfoIfNeeded() error {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
mock "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/kv"
|
||||
dbTest "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
testingDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
@@ -934,6 +935,7 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
p2p: p1,
|
||||
beaconDB: dbTest.SetupDB(t),
|
||||
chain: chain,
|
||||
stateNotifier: chain.StateNotifier(),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
|
||||
@@ -33,9 +34,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/sync/backfill/coverage"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/rand"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime"
|
||||
@@ -275,11 +278,6 @@ func (s *Service) Start() {
|
||||
|
||||
s.processPendingBlocksQueue()
|
||||
s.maintainPeerStatuses()
|
||||
|
||||
if params.FuluEnabled() {
|
||||
s.maintainCustodyInfo()
|
||||
}
|
||||
|
||||
s.resyncIfBehind()
|
||||
|
||||
// Update sync metrics.
|
||||
@@ -287,6 +285,15 @@ func (s *Service) Start() {
|
||||
|
||||
// Prune data column cache periodically on finalization.
|
||||
async.RunEvery(s.ctx, 30*time.Second, s.pruneDataColumnCache)
|
||||
|
||||
if !params.FuluEnabled() {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.maintainCustodyInfo(); err != nil {
|
||||
log.WithError(err).Error("Failed to maintain custody info")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Stop the regular sync service.
|
||||
@@ -452,6 +459,89 @@ func (s *Service) waitForInitialSync(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSupernode := flags.Get().Supernode
|
||||
isSemiSupernode := flags.Get().SemiSupernode
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSupernode, err := s.cfg.beaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
|
||||
}
|
||||
|
||||
// Compute the target custody group count based on current flag configuration.
|
||||
targetCustodyGroupCount := custodyRequirement
|
||||
|
||||
// Supernode: custody all groups (either currently set or previously enabled)
|
||||
if isSupernode {
|
||||
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
|
||||
if isSemiSupernode {
|
||||
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "minimum custody group count")
|
||||
}
|
||||
|
||||
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
|
||||
if slot < fuluForkSlot {
|
||||
slot, err = s.cfg.beaconDB.EarliestSlot(s.ctx)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "earliest slot")
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.beaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
if isSupernode {
|
||||
log.WithFields(logrus.Fields{
|
||||
"current": actualCustodyGroupCount,
|
||||
"target": cfg.NumberOfCustodyGroups,
|
||||
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
|
||||
}
|
||||
|
||||
if wasSupernode && !isSupernode {
|
||||
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, actualCustodyGroupCount, nil
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := cfg.FuluForkEpoch
|
||||
if fuluForkEpoch == cfg.FarFutureEpoch {
|
||||
return cfg.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
// Checker defines a struct which can verify whether a node is currently
|
||||
// synchronizing a chain with the rest of peers in the network.
|
||||
type Checker interface {
|
||||
|
||||
@@ -16,6 +16,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
@@ -440,3 +443,224 @@ func TestService_Stop_ConcurrentGoodbyeMessages(t *testing.T) {
|
||||
|
||||
require.Equal(t, false, util.WaitTimeout(&wg, 2*time.Second))
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
const (
|
||||
fuluForkEpoch = 10
|
||||
custodyRequirement = uint64(4)
|
||||
earliestStoredSlot = primitives.Slot(12)
|
||||
numberOfCustodyGroups = uint64(64)
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
cfg.CustodyRequirement = custodyRequirement
|
||||
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := t.Context()
|
||||
pbBlock := util.NewBeaconBlock()
|
||||
pbBlock.Block.Slot = 12
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CGC increases before fulu", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
service := Service{cfg: &config{beaconDB: beaconDB}}
|
||||
err = beaconDB.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Before Fulu
|
||||
// -----------
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(19)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// After Fulu
|
||||
// ----------
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("CGC increases after fulu", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
service := Service{cfg: &config{beaconDB: beaconDB}}
|
||||
err = beaconDB.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Before Fulu
|
||||
// -----------
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
// After Fulu
|
||||
// ----------
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("Supernode downgrade prevented", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
service := Service{cfg: &config{beaconDB: beaconDB}}
|
||||
err = beaconDB.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.Supernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should still be supernode
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
service := Service{cfg: &config{beaconDB: beaconDB}}
|
||||
err = beaconDB.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.SemiSupernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
service := Service{cfg: &config{beaconDB: beaconDB}}
|
||||
err = beaconDB.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start with semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Upgrade to full supernode
|
||||
gFlags.SemiSupernode = false
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should upgrade to full supernode
|
||||
upgradeSlot := slot + 2
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
|
||||
beaconDB := dbTest.SetupDB(t)
|
||||
service := Service{cfg: &config{beaconDB: beaconDB}}
|
||||
err = beaconDB.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Mock a high custody requirement (simulating many validators)
|
||||
// We need to override the custody requirement calculation
|
||||
// For this test, we'll verify the logic by checking if custodyRequirement > 64
|
||||
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
|
||||
// This would require a different test setup with actual validators
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
// With low validator requirements (4), should use semi-supernode minimum (64)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc)
|
||||
})
|
||||
}
|
||||
|
||||
3
changelog/manu-eas.md
Normal file
3
changelog/manu-eas.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- When adding the `--[semi-]supernode` flag, update the ealiest available slot accordingly.
|
||||
3
changelog/potuz_hdiff_migrate_to_cold.md
Normal file
3
changelog/potuz_hdiff_migrate_to_cold.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Migrate to cold with the hdiff feature.
|
||||
2
changelog/potuz_use_hashtree.md
Normal file
2
changelog/potuz_use_hashtree.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Add feature flag to use hashtree instead of gohashtre.
|
||||
@@ -70,6 +70,7 @@ type Flags struct {
|
||||
DisableStakinContractCheck bool // Disables check for deposit contract when proposing blocks
|
||||
IgnoreUnviableAttestations bool // Ignore attestations whose target state is not viable (avoids lagging-node DoS).
|
||||
|
||||
EnableHashtree bool // Enables usage of the hashtree library for hashing
|
||||
EnableVerboseSigVerification bool // EnableVerboseSigVerification specifies whether to verify individual signature if batch verification fails
|
||||
EnableProposerPreprocessing bool // EnableProposerPreprocessing enables proposer pre-processing of blocks before proposing.
|
||||
|
||||
@@ -237,6 +238,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
logEnabled(enableFullSSZDataLogging)
|
||||
cfg.EnableFullSSZDataLogging = true
|
||||
}
|
||||
if ctx.IsSet(enableHashtree.Name) {
|
||||
logEnabled(enableHashtree)
|
||||
cfg.EnableHashtree = true
|
||||
}
|
||||
cfg.EnableVerboseSigVerification = true
|
||||
if ctx.IsSet(disableVerboseSigVerification.Name) {
|
||||
logEnabled(disableVerboseSigVerification)
|
||||
|
||||
@@ -133,6 +133,10 @@ var (
|
||||
Name: "enable-beacon-rest-api",
|
||||
Usage: "(Experimental): Enables of the beacon REST API when querying a beacon node.",
|
||||
}
|
||||
enableHashtree = &cli.BoolFlag{
|
||||
Name: "enable-hashtree",
|
||||
Usage: "(Experimental): Enables the hashtree hashing library.",
|
||||
}
|
||||
disableVerboseSigVerification = &cli.BoolFlag{
|
||||
Name: "disable-verbose-sig-verification",
|
||||
Usage: "Disables identifying invalid signatures if batch verification fails when processing block.",
|
||||
@@ -278,6 +282,7 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
enableExperimentalAttestationPool,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
enableHashtree,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
func combinedFlags(flags ...[]cli.Flag) []cli.Flag {
|
||||
|
||||
@@ -36,7 +36,6 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -63,6 +62,7 @@ go_test(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/hash/htr:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -72,6 +72,5 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -5,10 +5,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/gohashtree"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -45,7 +45,7 @@ func VerifyKZGInclusionProof(blob ROBlob) error {
|
||||
return errInvalidBodyRoot
|
||||
}
|
||||
chunks := makeChunk(blob.KzgCommitment)
|
||||
gohashtree.HashChunks(chunks, chunks)
|
||||
htr.HashChunks(chunks, chunks)
|
||||
verified := trie.VerifyMerkleProof(root, chunks[0][:], blob.Index+KZGOffset, blob.CommitmentInclusionProof)
|
||||
if !verified {
|
||||
return errInvalidInclusionProof
|
||||
@@ -182,7 +182,7 @@ func LeavesFromCommitments(commitments [][]byte) [][]byte {
|
||||
leaves := make([][]byte, len(commitments))
|
||||
for i, kzg := range commitments {
|
||||
chunk := makeChunk(kzg)
|
||||
gohashtree.HashChunks(chunk, chunk)
|
||||
htr.HashChunks(chunk, chunk)
|
||||
leaves[i] = chunk[0][:]
|
||||
}
|
||||
return leaves
|
||||
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/prysmaticlabs/gohashtree"
|
||||
)
|
||||
|
||||
func Test_MerkleProofKZGCommitment_Altair(t *testing.T) {
|
||||
@@ -108,7 +108,7 @@ func Test_MerkleProofKZGCommitment(t *testing.T) {
|
||||
require.Equal(t, true, trie.VerifyMerkleProof(root[:], commitmentsRoot[:], kzgPosition, topProof[:len(topProof)-1]))
|
||||
|
||||
chunk := makeChunk(kzgs[index])
|
||||
gohashtree.HashChunks(chunk, chunk)
|
||||
htr.HashChunks(chunk, chunk)
|
||||
require.Equal(t, true, trie.VerifyMerkleProof(root[:], chunk[0][:], uint64(index+KZGOffset), proof))
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,12 @@ go_library(
|
||||
srcs = ["hashtree.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/crypto/hash/htr",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_prysmaticlabs_gohashtree//:go_default_library"],
|
||||
deps = [
|
||||
"//config/features:go_default_library",
|
||||
"@com_github_offchainlabs_hashtree//:go_default_library",
|
||||
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
@@ -13,5 +18,8 @@ go_test(
|
||||
size = "small",
|
||||
srcs = ["hashtree_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
deps = [
|
||||
"//config/features:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,14 +4,38 @@ import (
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/hashtree"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/prysmaticlabs/gohashtree"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const minSliceSizeToParallelize = 5000
|
||||
|
||||
// Hash hashes chunks pairwise into digests using the configured hashing library.
|
||||
// It performs input validation (odd chunks, digest length).
|
||||
func Hash(digests, chunks [][32]byte) error {
|
||||
if features.Get().EnableHashtree {
|
||||
return hashtree.Hash(digests, chunks)
|
||||
}
|
||||
return gohashtree.Hash(digests, chunks)
|
||||
}
|
||||
|
||||
// HashChunks hashes chunks pairwise into digests without error checking.
|
||||
// The caller must ensure inputs are valid (even chunks, sufficient digest space).
|
||||
func HashChunks(digests, chunks [][32]byte) {
|
||||
if features.Get().EnableHashtree {
|
||||
if err := hashtree.Hash(digests, chunks); err != nil {
|
||||
log.WithError(err).Error("Could not hash chunks")
|
||||
}
|
||||
} else {
|
||||
gohashtree.HashChunks(digests, chunks)
|
||||
}
|
||||
}
|
||||
|
||||
func hashParallel(inputList [][32]byte, outputList [][32]byte, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
err := gohashtree.Hash(outputList, inputList)
|
||||
err := Hash(outputList, inputList)
|
||||
if err != nil {
|
||||
panic(err) // lint:nopanic -- This should never panic.
|
||||
}
|
||||
@@ -25,7 +49,7 @@ func hashParallel(inputList [][32]byte, outputList [][32]byte, wg *sync.WaitGrou
|
||||
func VectorizedSha256(inputList [][32]byte) [][32]byte {
|
||||
outputList := make([][32]byte, len(inputList)/2)
|
||||
if len(inputList) < minSliceSizeToParallelize {
|
||||
err := gohashtree.Hash(outputList, inputList)
|
||||
err := Hash(outputList, inputList)
|
||||
if err != nil {
|
||||
panic(err) // lint:nopanic -- This should never panic.
|
||||
}
|
||||
@@ -38,7 +62,7 @@ func VectorizedSha256(inputList [][32]byte) [][32]byte {
|
||||
for j := range n {
|
||||
go hashParallel(inputList[j*2*groupSize:(j+1)*2*groupSize], outputList[j*groupSize:], &wg)
|
||||
}
|
||||
err := gohashtree.Hash(outputList[n*groupSize:], inputList[n*2*groupSize:])
|
||||
err := Hash(outputList[n*groupSize:], inputList[n*2*groupSize:])
|
||||
if err != nil {
|
||||
panic(err) // lint:nopanic -- This should never panic.
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
@@ -23,3 +24,25 @@ func Test_VectorizedSha256(t *testing.T) {
|
||||
require.Equal(t, r, hash2[i])
|
||||
}
|
||||
}
|
||||
|
||||
func Test_VectorizedSha256_hashtree_enabled(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
EnableHashtree: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
largeSlice := make([][32]byte, 32*minSliceSizeToParallelize)
|
||||
secondLargeSlice := make([][32]byte, 32*minSliceSizeToParallelize)
|
||||
hash1 := make([][32]byte, 16*minSliceSizeToParallelize)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Go(func() {
|
||||
tempHash := VectorizedSha256(largeSlice)
|
||||
copy(hash1, tempHash)
|
||||
})
|
||||
wg.Wait()
|
||||
hash2 := VectorizedSha256(secondLargeSlice)
|
||||
require.Equal(t, len(hash1), len(hash2))
|
||||
for i, r := range hash1 {
|
||||
require.Equal(t, r, hash2[i])
|
||||
}
|
||||
}
|
||||
|
||||
9
deps.bzl
9
deps.bzl
@@ -2449,6 +2449,15 @@ def prysm_deps():
|
||||
sum = "h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=",
|
||||
version = "v1.4.11",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_offchainlabs_hashtree",
|
||||
build_file_generation = "off",
|
||||
importpath = "github.com/OffchainLabs/hashtree",
|
||||
patch_args = ["-p1"],
|
||||
patches = ["//third_party:com_github_offchainlabs_hashtree.patch"],
|
||||
sum = "h1:R6DAjgAUwwfgji3jEI4WUxtZ3eJ+FbRHjW21UPMBJyo=",
|
||||
version = "v0.2.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_oklog_oklog",
|
||||
importpath = "github.com/oklog/oklog",
|
||||
|
||||
@@ -21,7 +21,6 @@ go_library(
|
||||
"@com_github_minio_sha256_simd//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_prysmaticlabs_gohashtree//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/gohashtree"
|
||||
)
|
||||
|
||||
var errInvalidNilSlice = errors.New("invalid empty slice")
|
||||
@@ -182,10 +181,10 @@ func MerkleizeListSSZ[T Hashable](elements []T, limit uint64) ([32]byte, error)
|
||||
chunks := make([][32]byte, 2)
|
||||
chunks[0] = body
|
||||
binary.LittleEndian.PutUint64(chunks[1][:], uint64(len(elements)))
|
||||
if err := gohashtree.Hash(chunks, chunks); err != nil {
|
||||
if err = htr.Hash(chunks, chunks); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return chunks[0], err
|
||||
return chunks[0], nil
|
||||
}
|
||||
|
||||
// MerkleizeByteSliceSSZ hashes a byteslice by chunkifying it and returning the
|
||||
|
||||
1
go.mod
1
go.mod
@@ -6,6 +6,7 @@ require (
|
||||
github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240516070431-7828990cad7d
|
||||
github.com/MariusVanDerWijden/tx-fuzz v1.4.0
|
||||
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506
|
||||
github.com/OffchainLabs/hashtree v0.2.3
|
||||
github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96
|
||||
github.com/bazelbuild/rules_go v0.23.2
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4
|
||||
|
||||
2
go.sum
2
go.sum
@@ -59,6 +59,8 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERo
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506 h1:d/SJkN8/9Ca+1YmuDiUJxAiV4w/a9S8NcsG7GMQSrVI=
|
||||
github.com/OffchainLabs/go-bitfield v0.0.0-20251031151322-f427d04d8506/go.mod h1:6TZI4FU6zT8x6ZfWa1J8YQ2NgW0wLV/W3fHRca8ISBo=
|
||||
github.com/OffchainLabs/hashtree v0.2.3 h1:nM8dBAQZzHLzzM14FaAHXnHTAXZIst69v5xWuS48y/c=
|
||||
github.com/OffchainLabs/hashtree v0.2.3/go.mod h1:b07+cRZs+eAR8TR57CB9TQlt5Gnl/06Xs76xt/1wq0M=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
|
||||
90
third_party/com_github_offchainlabs_hashtree.patch
vendored
Normal file
90
third_party/com_github_offchainlabs_hashtree.patch
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
diff -urN a/BUILD.bazel b/BUILD.bazel
|
||||
--- a/BUILD.bazel 1969-12-31 18:00:00.000000000 -0600
|
||||
+++ b/BUILD.bazel 2025-01-05 12:00:00.000000000 -0600
|
||||
@@ -0,0 +1,86 @@
|
||||
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
+
|
||||
+go_library(
|
||||
+ name = "go_default_library",
|
||||
+ srcs = [
|
||||
+ "bindings.go",
|
||||
+ "sha256_1_generic.go",
|
||||
+ ] + select({
|
||||
+ "@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
+ "bindings_amd64.go",
|
||||
+ "wrapper_linux_amd64.s",
|
||||
+ ":hashtree_amd64_syso",
|
||||
+ ],
|
||||
+ "@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
+ "bindings_amd64.go",
|
||||
+ "wrapper_windows_amd64.s",
|
||||
+ ":hashtree_amd64_syso",
|
||||
+ ],
|
||||
+ "@io_bazel_rules_go//go/platform:linux_arm64": [
|
||||
+ "bindings_arm64.go",
|
||||
+ "wrapper_arm64.s",
|
||||
+ ":hashtree_arm64_syso",
|
||||
+ ],
|
||||
+ "@io_bazel_rules_go//go/platform:darwin_arm64": [
|
||||
+ "bindings_arm64.go",
|
||||
+ "wrapper_arm64.s",
|
||||
+ ":hashtree_arm64_syso",
|
||||
+ ],
|
||||
+ "@io_bazel_rules_go//go/platform:darwin_amd64": [
|
||||
+ "bindings_darwin_amd64.go",
|
||||
+ ],
|
||||
+ "//conditions:default": [],
|
||||
+ }),
|
||||
+ importpath = "github.com/OffchainLabs/hashtree",
|
||||
+ visibility = ["//visibility:public"],
|
||||
+ deps = ["@com_github_klauspost_cpuid_v2//:go_default_library"],
|
||||
+)
|
||||
+
|
||||
+genrule(
|
||||
+ name = "hashtree_arm64_syso",
|
||||
+ srcs = [":hashtree_arm64"],
|
||||
+ outs = ["hashtree_arm64.syso"],
|
||||
+ cmd = "cp $(location :hashtree_arm64) $@",
|
||||
+)
|
||||
+
|
||||
+genrule(
|
||||
+ name = "hashtree_amd64_syso",
|
||||
+ srcs = [":hashtree_amd64"],
|
||||
+ outs = ["hashtree_amd64.syso"],
|
||||
+ cmd = "cp $(location :hashtree_amd64) $@",
|
||||
+)
|
||||
+
|
||||
+cc_library(
|
||||
+ name = "hashtree_arm64",
|
||||
+ srcs = [
|
||||
+ "src/hashtree.c",
|
||||
+ "src/sha256_generic.c",
|
||||
+ "src/sha256_armv8_crypto.S",
|
||||
+ "src/sha256_armv8_neon_x1.S",
|
||||
+ "src/sha256_armv8_neon_x4.S",
|
||||
+ ],
|
||||
+ hdrs = ["src/hashtree.h"],
|
||||
+ copts = ["-O3", "-Wall"],
|
||||
+ linkstatic = True,
|
||||
+ strip_include_prefix = "src",
|
||||
+ visibility = ["//visibility:public"],
|
||||
+)
|
||||
+
|
||||
+cc_library(
|
||||
+ name = "hashtree_amd64",
|
||||
+ srcs = [
|
||||
+ "src/hashtree.c",
|
||||
+ "src/sha256_generic.c",
|
||||
+ "src/sha256_avx_x1.S",
|
||||
+ "src/sha256_avx_x4.S",
|
||||
+ "src/sha256_avx_x8.S",
|
||||
+ "src/sha256_avx_x16.S",
|
||||
+ "src/sha256_shani.S",
|
||||
+ "src/sha256_sse_x1.S",
|
||||
+ ],
|
||||
+ hdrs = ["src/hashtree.h"],
|
||||
+ copts = ["-O3", "-Wall", "-fno-integrated-as"],
|
||||
+ linkstatic = True,
|
||||
+ strip_include_prefix = "src",
|
||||
+ visibility = ["//visibility:public"],
|
||||
+)
|
||||
Reference in New Issue
Block a user