mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
194 Commits
check-cont
...
test100
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1bc9c58da1 | ||
|
|
7fa97b2952 | ||
|
|
4b9e23e55a | ||
|
|
9ee7d6f7da | ||
|
|
41eaa96396 | ||
|
|
d24ee89e57 | ||
|
|
788af59823 | ||
|
|
38cf0e9e94 | ||
|
|
d4563827d7 | ||
|
|
be14c4c3d1 | ||
|
|
0a0c1b24f1 | ||
|
|
44ef29d33a | ||
|
|
e2461912f0 | ||
|
|
7a619cff36 | ||
|
|
144e28def4 | ||
|
|
9269895f6a | ||
|
|
028b115b8c | ||
|
|
81aff9fc58 | ||
|
|
abb8eb373d | ||
|
|
f72d59b004 | ||
|
|
e25497be3e | ||
|
|
d4233471d2 | ||
|
|
d63ae69920 | ||
|
|
b9fd32dfff | ||
|
|
8897a26f84 | ||
|
|
559d02bf4d | ||
|
|
b2a26f2b62 | ||
|
|
09659010f8 | ||
|
|
62fec4d1f3 | ||
|
|
6a13ba9125 | ||
|
|
589042df20 | ||
|
|
312b93e9b1 | ||
|
|
f86f76e447 | ||
|
|
c311e652eb | ||
|
|
6a5d78a331 | ||
|
|
a2fd30497e | ||
|
|
a94561f8dc | ||
|
|
af875b78c9 | ||
|
|
61207bd3ac | ||
|
|
0b6fcd7d17 | ||
|
|
fe2766e716 | ||
|
|
9135d765e1 | ||
|
|
eca87f29d1 | ||
|
|
00821c8f55 | ||
|
|
4b9e92bcd7 | ||
|
|
b01d9005b8 | ||
|
|
8d812d5f0e | ||
|
|
24a3cb2a8b | ||
|
|
66d1d3e248 | ||
|
|
99933678ea | ||
|
|
34f8e1e92b | ||
|
|
a6a41a8755 | ||
|
|
f110b94fac | ||
|
|
33023aa282 | ||
|
|
eeb3cdc99e | ||
|
|
1e7147f060 | ||
|
|
8936beaff3 | ||
|
|
c00283f247 | ||
|
|
a4269cf308 | ||
|
|
91f3c8a4d0 | ||
|
|
30c7ee9c7b | ||
|
|
456d8b9eb9 | ||
|
|
4fe3e6d31a | ||
|
|
01ee1c80b4 | ||
|
|
c14fe47a81 | ||
|
|
b9deabbf0a | ||
|
|
5d66a98e78 | ||
|
|
2d46d6ffae | ||
|
|
57107e50a7 | ||
|
|
47271254f6 | ||
|
|
f304028874 | ||
|
|
8abc5e159a | ||
|
|
b1ac53c4dd | ||
|
|
27ab68c856 | ||
|
|
ddf5a3953b | ||
|
|
92d2fc101d | ||
|
|
8996000d2b | ||
|
|
a2fcba2349 | ||
|
|
abe8638991 | ||
|
|
0b5064b474 | ||
|
|
da9d4cf5b9 | ||
|
|
a62cca15dd | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
a216cb4105 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []structs.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package builder
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
|
||||
@@ -12,16 +12,12 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v6/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -36,152 +32,7 @@ var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
|
||||
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
|
||||
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*enginev1.PayloadIDBytes, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
|
||||
defer span.End()
|
||||
|
||||
if arg.headBlock == nil || arg.headBlock.IsNil() {
|
||||
log.Error("Head block is nil")
|
||||
return nil, nil
|
||||
}
|
||||
headBlk := arg.headBlock.Block()
|
||||
if headBlk == nil || headBlk.IsNil() || headBlk.Body().IsNil() {
|
||||
log.Error("Head block is nil")
|
||||
return nil, nil
|
||||
}
|
||||
// Must not call fork choice updated until the transition conditions are met on the Pow network.
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine if head block is execution block")
|
||||
return nil, nil
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil, nil
|
||||
}
|
||||
headPayload, err := headBlk.Body().Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return nil, nil
|
||||
}
|
||||
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash(),
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
if len(fcs.HeadBlockHash) != 32 || [32]byte(fcs.HeadBlockHash) == [32]byte{} {
|
||||
// check if we are sending FCU at genesis
|
||||
hash, err := s.hashForGenesisBlock(ctx, arg.headRoot)
|
||||
if errors.Is(err, errNotGenesisRoot) {
|
||||
log.Error("Sending nil head block hash to execution engine")
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head block hash")
|
||||
}
|
||||
fcs.HeadBlockHash = hash
|
||||
}
|
||||
if arg.attributes == nil {
|
||||
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
|
||||
}
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash())),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
forkchoiceUpdatedInvalidNodeCount.Inc()
|
||||
headRoot := arg.headRoot
|
||||
if len(lastValidHash) == 0 {
|
||||
lastValidHash = defaultLatestValidHash
|
||||
}
|
||||
invalidRoots, err := s.cfg.ForkChoiceStore.SetOptimisticToInvalid(ctx, headRoot, headBlk.ParentRoot(), bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
log.WithError(err).Error("Could not remove invalid block and state")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
r, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": headBlk.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
|
||||
"invalidChildrenCount": len(invalidRoots),
|
||||
}).Warn("Pruned invalid blocks, could not update head root")
|
||||
return nil, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
|
||||
}
|
||||
b, err := s.getBlock(ctx, r)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
return nil, nil
|
||||
}
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head state")
|
||||
return nil, nil
|
||||
}
|
||||
pid, err := s.notifyForkchoiceUpdate(ctx, &fcuConfig{
|
||||
headState: st,
|
||||
headRoot: r,
|
||||
headBlock: b,
|
||||
attributes: arg.attributes,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err // Returning err because it's recursive here.
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, r, b, st); err != nil {
|
||||
log.WithError(err).Error("could not save head after pruning invalid blocks")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": headBlk.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
|
||||
"invalidChildrenCount": len(invalidRoots),
|
||||
"newHeadRoot": fmt.Sprintf("%#x", bytesutil.Trunc(r[:])),
|
||||
}).Warn("Pruned invalid blocks")
|
||||
return pid, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
|
||||
default:
|
||||
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, arg.headRoot); err != nil {
|
||||
log.WithError(err).Error("Could not set head root to valid")
|
||||
return nil, nil
|
||||
}
|
||||
// If the forkchoice update call has an attribute, update the payload ID cache.
|
||||
hasAttr := arg.attributes != nil && !arg.attributes.IsEmpty()
|
||||
nextSlot := s.CurrentSlot() + 1
|
||||
if hasAttr && payloadID != nil {
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(arg.headRoot[:])),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"nextSlot": nextSlot,
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
|
||||
} else if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
"slot": headBlk.Slot(),
|
||||
"nextSlot": nextSlot,
|
||||
}).Error("Received nil payload ID on VALID engine response")
|
||||
}
|
||||
return payloadID, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *Service) firePayloadAttributesEvent(f event.SubscriberSender, block interfaces.ReadOnlySignedBeaconBlock, root [32]byte, nextSlot primitives.Slot) {
|
||||
@@ -219,76 +70,7 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if blk == nil {
|
||||
return false, errors.New("signed beacon block can't be nil")
|
||||
}
|
||||
if preStateVersion < version.Bellatrix {
|
||||
return true, nil
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
if !enabled {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not get execution payload")
|
||||
}
|
||||
|
||||
var lastValidHash []byte
|
||||
var parentRoot *common.Hash
|
||||
var versionedHashes []common.Hash
|
||||
var requests *enginev1.ExecutionRequests
|
||||
if blk.Version() >= version.Deneb {
|
||||
versionedHashes, err = kzgCommitmentsToVersionedHashes(blk.Block().Body())
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get versioned hashes to feed the engine")
|
||||
}
|
||||
prh := common.Hash(blk.Block().ParentRoot())
|
||||
parentRoot = &prh
|
||||
}
|
||||
if blk.Version() >= version.Electra {
|
||||
requests, err = blk.Block().Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
if requests == nil {
|
||||
return false, errors.New("nil execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
|
||||
@@ -26,9 +26,6 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
if len(b.Body().Attestations()) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||
}
|
||||
if len(b.Body().Deposits()) > 0 {
|
||||
log = log.WithField("deposits", len(b.Body().Deposits()))
|
||||
}
|
||||
if len(b.Body().AttesterSlashings()) > 0 {
|
||||
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
|
||||
}
|
||||
@@ -111,7 +108,6 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
@@ -159,7 +155,9 @@ func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
if len(changes) > 0 {
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
}
|
||||
}
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
|
||||
@@ -53,7 +53,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 deposits=1 prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||
},
|
||||
{name: "has attester slashing",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -93,7 +93,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 deposits=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has payload",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
|
||||
@@ -258,3 +258,12 @@ func WithLightClientStore(lcs *lightclient.Store) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithStartWaitingDataColumnSidecars sets a channel that the `areDataColumnsAvailable` function will fill
|
||||
// in when starting to wait for additional data columns.
|
||||
func WithStartWaitingDataColumnSidecars(c chan bool) Option {
|
||||
return func(s *Service) error {
|
||||
s.startWaitingDataColumnSidecars = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -637,7 +637,11 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the sidecar notifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
func (s *Service) isDataAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
signedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) error {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
@@ -657,7 +661,11 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signedBloc
|
||||
|
||||
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams.RootLength]byte, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
@@ -724,6 +732,10 @@ func (s *Service) areDataColumnsAvailable(ctx context.Context, root [fieldparams
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.startWaitingDataColumnSidecars != nil {
|
||||
s.startWaitingDataColumnSidecars <- true
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(block.Slot()+1, s.genesisTime)
|
||||
|
||||
|
||||
@@ -3332,17 +3332,27 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
|
||||
require.NoError(t, err)
|
||||
|
||||
root, err := signedBeaconBlock.Block.HashTreeRoot()
|
||||
block := signedBeaconBlock.Block
|
||||
bodyRoot, err := block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]util.DataColumnParams, 0, len(params.columnsToSave))
|
||||
root, err := block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(params.columnsToSave))
|
||||
for _, i := range params.columnsToSave {
|
||||
dataColumnParam := util.DataColumnParams{ColumnIndex: i}
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: block.Slot,
|
||||
ProposerIndex: block.ProposerIndex,
|
||||
ParentRoot: block.ParentRoot,
|
||||
StateRoot: block.StateRoot,
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParam)
|
||||
}
|
||||
|
||||
dataColumnParamsByBlockRoot := util.DataColumnsParamsByRoot{root: dataColumnsParams}
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
_, verifiedRODataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
err = dataColumnStorage.Save(verifiedRODataColumns)
|
||||
require.NoError(t, err)
|
||||
@@ -3402,38 +3412,47 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Fulu - some initially missing data columns (no reconstruction)", func(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: []uint64{1, 17, 19, 75, 102, 117, 119}, // 119 is not needed, 42 and 87 are missing
|
||||
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
var wrongRoot [fieldparams.RootLength]byte
|
||||
copy(wrongRoot[:], root[:])
|
||||
wrongRoot[0]++ // change the root to simulate a wrong root
|
||||
_, verifiedSidecarsWrongRoot := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
[]util.DataColumnParam{
|
||||
{Index: 42, Slot: slot + 1}, // Needed index, but not for this slot.
|
||||
})
|
||||
|
||||
_, verifiedSidecarsWrongRoot := util.CreateTestVerifiedRoDataColumnSidecars(t, util.DataColumnsParamsByRoot{wrongRoot: {
|
||||
{ColumnIndex: 42}, // needed
|
||||
}})
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Index: 87, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Needed index
|
||||
{Index: 1, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Not needed index
|
||||
{Index: 42, Slot: slot, ProposerIndex: proposerIndex, ParentRoot: parentRoot[:], StateRoot: stateRoot[:], BodyRoot: bodyRoot[:]}, // Needed index
|
||||
})
|
||||
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, util.DataColumnsParamsByRoot{root: {
|
||||
{ColumnIndex: 87}, // needed
|
||||
{ColumnIndex: 1}, // not needed
|
||||
{ColumnIndex: 42}, // needed
|
||||
}})
|
||||
go func() {
|
||||
<-startWaiting
|
||||
|
||||
time.AfterFunc(10*time.Millisecond, func() {
|
||||
err := service.dataColumnStorage.Save(verifiedSidecarsWrongRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.dataColumnStorage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -3442,6 +3461,9 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
missingColumns = uint64(2)
|
||||
cgc = 128
|
||||
)
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
var custodyInfo peerdas.CustodyInfo
|
||||
custodyInfo.TargetGroupCount.SetValidatorsCustodyRequirement(cgc)
|
||||
custodyInfo.ToAdvertiseGroupCount.Set(cgc)
|
||||
@@ -3454,41 +3476,61 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
}
|
||||
|
||||
testParams := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&custodyInfo)},
|
||||
options: []Option{WithCustodyInfo(&custodyInfo), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
columnsToSave: indices,
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, testParams)
|
||||
block := signed.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnParams := make([]util.DataColumnParams, 0, missingColumns)
|
||||
dataColumnParams := make([]util.DataColumnParam, 0, missingColumns)
|
||||
for i := minimumColumnsCountToReconstruct - missingColumns; i < minimumColumnsCountToReconstruct; i++ {
|
||||
dataColumnParam := util.DataColumnParams{ColumnIndex: i}
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnParams = append(dataColumnParams, dataColumnParam)
|
||||
}
|
||||
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, util.DataColumnsParamsByRoot{root: dataColumnParams})
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParams)
|
||||
|
||||
go func() {
|
||||
<-startWaiting
|
||||
|
||||
time.AfterFunc(10*time.Millisecond, func() {
|
||||
err := service.dataColumnStorage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
err = service.isDataAvailable(ctx, root, signed)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - some columns are definitively missing", func(t *testing.T) {
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
params := testIsAvailableParams{
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{})},
|
||||
options: []Option{WithCustodyInfo(&peerdas.CustodyInfo{}), WithStartWaitingDataColumnSidecars(startWaiting)},
|
||||
blobKzgCommitmentsCount: 3,
|
||||
}
|
||||
|
||||
ctx, cancel, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
time.AfterFunc(10*time.Millisecond, func() {
|
||||
go func() {
|
||||
<-startWaiting
|
||||
cancel()
|
||||
})
|
||||
}()
|
||||
|
||||
err := service.isDataAvailable(ctx, root, signed)
|
||||
require.NotNil(t, err)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -31,6 +31,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -47,27 +48,28 @@ import (
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
dataColumnStorage *filesystem.DataColumnStorage
|
||||
slasherEnabled bool
|
||||
lcStore *lightClient.Store
|
||||
startWaitingDataColumnSidecars chan bool // for testing purposes only
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -109,22 +111,26 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][]bool
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex map[[32]byte][]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// if idx >= uint64(maxBlobsPerBlock) {
|
||||
// return
|
||||
// }
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
// TODO: Separate blobs from data columns
|
||||
// if seen == nil {
|
||||
// seen = make([]bool, maxBlobsPerBlock)
|
||||
// }
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -135,7 +141,9 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -145,12 +153,15 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitive
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
// TODO: Separate blobs from data columns
|
||||
// maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
// TODO: Separate blobs from data columns
|
||||
// c = make(chan uint64, maxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -176,7 +187,9 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -429,7 +429,7 @@ func TestProcessChainStartTime_ReceivedFeed(t *testing.T) {
|
||||
|
||||
func BenchmarkHasBlockDB(b *testing.B) {
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
ctx := t.Context()
|
||||
ctx := b.Context()
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
@@ -447,7 +447,7 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
ctx := t.Context()
|
||||
ctx := b.Context()
|
||||
beaconDB := testDB.SetupDB(b)
|
||||
s := &Service{
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
@@ -587,7 +587,9 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
// TODO: Separate blobs from data columns
|
||||
// seenIndex: make(map[[32]byte][]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
||||
1
beacon-chain/cache/committee_fuzz_test.go
vendored
1
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -3,7 +3,6 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
|
||||
@@ -2,7 +2,6 @@ package depositsnapshot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
|
||||
1
beacon-chain/cache/registration_test.go
vendored
1
beacon-chain/cache/registration_test.go
vendored
@@ -1,7 +1,6 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
||||
1
beacon-chain/cache/skip_slot_cache_test.go
vendored
1
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -1,7 +1,6 @@
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package altair_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -96,12 +95,30 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
currentEpoch := slots.ToEpoch(blk.Block().Slot())
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package epoch_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package precompute_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/epoch/precompute"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package precompute_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package precompute_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/epoch/precompute"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package precompute
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
|
||||
@@ -38,11 +38,14 @@ const (
|
||||
// SingleAttReceived is sent after a single attestation object is received from gossip or rpc
|
||||
SingleAttReceived = 9
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 10
|
||||
|
||||
// BlockGossipReceived is sent after a block has been received from gossip or API that passes validation rules.
|
||||
BlockGossipReceived = 10
|
||||
BlockGossipReceived = 11
|
||||
|
||||
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
|
||||
DataColumnReceived = 11
|
||||
DataColumnReceived = 12
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -94,6 +97,11 @@ type SingleAttReceivedData struct {
|
||||
Attestation ethpb.Att
|
||||
}
|
||||
|
||||
// DataColumnSidecarReceivedData is the data sent with DataColumnSidecarReceived events.
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
// BlockGossipReceivedData is the data sent with BlockGossipReceived events.
|
||||
type BlockGossipReceivedData struct {
|
||||
// SignedBlock is the block that was received.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package fulu_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/fulu"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -669,11 +669,11 @@ func ComputeCommittee(
|
||||
// InitializeProposerLookahead computes the list of the proposer indices for the next MIN_SEED_LOOKAHEAD + 1 epochs.
|
||||
func InitializeProposerLookahead(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]uint64, error) {
|
||||
lookAhead := make([]uint64, 0, uint64(params.BeaconConfig().MinSeedLookahead+1)*uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
for i := range params.BeaconConfig().MinSeedLookahead + 1 {
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch+i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
proposerIndices, err := PrecomputeProposerIndices(state, indices, epoch+i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute proposer indices")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
@@ -502,7 +501,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(t.Context(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(b.Context(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
@@ -536,7 +535,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(t.Context(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(b.Context(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
@@ -570,7 +569,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(t.Context(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(b.Context(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
@@ -605,7 +604,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(t.Context(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(b.Context(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
@@ -640,7 +639,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
|
||||
epoch := time.CurrentEpoch(state)
|
||||
indices, err := helpers.ActiveValidatorIndices(t.Context(), state, epoch)
|
||||
indices, err := helpers.ActiveValidatorIndices(b.Context(), state, epoch)
|
||||
require.NoError(b, err)
|
||||
seed, err := helpers.Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"info.go",
|
||||
"metrics.go",
|
||||
"p2p_interface.go",
|
||||
"peer_sampling.go",
|
||||
"reconstruction.go",
|
||||
"validator.go",
|
||||
"verification.go",
|
||||
@@ -44,6 +45,7 @@ go_test(
|
||||
"das_core_test.go",
|
||||
"info_test.go",
|
||||
"p2p_interface_test.go",
|
||||
"peer_sampling_test.go",
|
||||
"reconstruction_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
|
||||
@@ -10,10 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodyGroupCountEnrKey = "cgc"
|
||||
kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
)
|
||||
const kzgPosition = 11 // The index of the KZG commitment list in the Body
|
||||
|
||||
var (
|
||||
ErrIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
@@ -30,7 +27,7 @@ var (
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#custody-group-count
|
||||
type Cgc uint64
|
||||
|
||||
func (Cgc) ENRKey() string { return CustodyGroupCountEnrKey }
|
||||
func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCountKey }
|
||||
|
||||
// VerifyDataColumnSidecar verifies if the data column sidecar is valid.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
|
||||
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
56
beacon-chain/core/peerdas/peer_sampling.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
)
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.5/specs/fulu/peer-sampling.md#get_extended_sample_count
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
60
beacon-chain/core/peerdas/peer_sampling_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"domain.go",
|
||||
"signature.go",
|
||||
"signing_root.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing",
|
||||
@@ -25,7 +24,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"domain_test.go",
|
||||
"signature_test.go",
|
||||
"signing_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrNilRegistration = errors.New("nil signed registration")
|
||||
|
||||
// VerifyRegistrationSignature verifies the signature of a validator's registration.
|
||||
func VerifyRegistrationSignature(
|
||||
sr *ethpb.SignedValidatorRegistrationV1,
|
||||
) error {
|
||||
if sr == nil || sr.Message == nil {
|
||||
return ErrNilRegistration
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
// Per spec, we want the fork version and genesis validator to be nil.
|
||||
// Which is genesis value and zero by default.
|
||||
sd, err := ComputeDomain(
|
||||
d,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package signing_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
reg := ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
GasLimit: 123456,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(reg, domain)
|
||||
require.NoError(t, err)
|
||||
sk.Sign(sr[:]).Marshal()
|
||||
|
||||
sReg := ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
|
||||
|
||||
sReg.Signature = []byte("bad")
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
|
||||
|
||||
sReg.Message = nil
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package signing_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
|
||||
@@ -53,6 +53,11 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return params.FuluEnabled() && slots.ToEpoch(slot) >= params.BeaconConfig().FuluForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
@@ -33,7 +32,7 @@ func BenchmarkExecuteStateTransition_FullBlock(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
_, err = coreState.ExecuteStateTransition(t.Context(), cleanStates[i], wsb)
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
@@ -53,19 +52,19 @@ func BenchmarkExecuteStateTransition_WithCache(b *testing.B) {
|
||||
// some attestations in block are from previous epoch
|
||||
currentSlot := beaconState.Slot()
|
||||
require.NoError(b, beaconState.SetSlot(beaconState.Slot()-params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(t.Context(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(b.Context(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, beaconState.SetSlot(currentSlot))
|
||||
// Run the state transition once to populate the cache.
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
_, err = coreState.ExecuteStateTransition(t.Context(), beaconState, wsb)
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), beaconState, wsb)
|
||||
require.NoError(b, err, "Failed to process block, benchmarks will fail")
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(b, err)
|
||||
_, err = coreState.ExecuteStateTransition(t.Context(), cleanStates[i], wsb)
|
||||
_, err = coreState.ExecuteStateTransition(b.Context(), cleanStates[i], wsb)
|
||||
require.NoError(b, err, "Failed to process block, benchmarks will fail")
|
||||
}
|
||||
}
|
||||
@@ -81,14 +80,14 @@ func BenchmarkProcessEpoch_2FullEpochs(b *testing.B) {
|
||||
// some attestations in block are from previous epoch
|
||||
currentSlot := beaconState.Slot()
|
||||
require.NoError(b, beaconState.SetSlot(beaconState.Slot()-params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(t.Context(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, helpers.UpdateCommitteeCache(b.Context(), beaconState, time.CurrentEpoch(beaconState)))
|
||||
require.NoError(b, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
// ProcessEpochPrecompute is the optimized version of process epoch. It's enabled by default
|
||||
// at run time.
|
||||
_, err := coreState.ProcessEpochPrecompute(t.Context(), beaconState.Copy())
|
||||
_, err := coreState.ProcessEpochPrecompute(b.Context(), beaconState.Copy())
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
@@ -99,7 +98,7 @@ func BenchmarkHashTreeRoot_FullState(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := beaconState.HashTreeRoot(t.Context())
|
||||
_, err := beaconState.HashTreeRoot(b.Context())
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
@@ -108,7 +107,7 @@ func BenchmarkHashTreeRootState_FullState(b *testing.B) {
|
||||
beaconState, err := benchmark.PreGenstateFullEpochs()
|
||||
require.NoError(b, err)
|
||||
|
||||
ctx := t.Context()
|
||||
ctx := b.Context()
|
||||
|
||||
// Hydrate the HashTreeRootState cache.
|
||||
_, err = beaconState.HashTreeRoot(ctx)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
@@ -757,7 +756,7 @@ func BenchmarkProcessSlots_Capella(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(t.Context(), st, st.Slot()+1)
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
@@ -771,7 +770,7 @@ func BenchmarkProcessSlots_Deneb(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(t.Context(), st, st.Slot()+1)
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
@@ -785,7 +784,7 @@ func BenchmarkProcessSlots_Electra(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(t.Context(), st, st.Slot()+1)
|
||||
st, err = transition.ProcessSlots(b.Context(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package validators_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
|
||||
@@ -38,9 +38,9 @@ func TestPersist(t *testing.T) {
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]util.DataColumnParams{
|
||||
{1}: {{ColumnIndex: 1}},
|
||||
{2}: {{ColumnIndex: 2}},
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
@@ -54,8 +54,8 @@ func TestPersist(t *testing.T) {
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]util.DataColumnParams{
|
||||
{1}: {{ColumnIndex: 1}},
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
@@ -67,21 +67,24 @@ func TestPersist(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := map[[fieldparams.RootLength]byte][]util.DataColumnParams{
|
||||
{}: {{ColumnIndex: 1}, {ColumnIndex: 5}},
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, &peerdas.CustodyInfo{})
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := cacheKey{slot: 0, root: [fieldparams.RootLength]byte{}}
|
||||
entry := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
@@ -121,24 +124,37 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, &peerdas.CustodyInfo{})
|
||||
|
||||
indices := [...]uint64{1, 17, 87, 102}
|
||||
dataColumnsParams := make([]util.DataColumnParams, 0, len(indices))
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParams{
|
||||
ColumnIndex: index,
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
dataColumnsParamsByBlockRoot := util.DataColumnsParamsByRoot{root: dataColumnsParams}
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParamsByBlockRoot)
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
key := cacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
@@ -149,7 +165,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
@@ -224,8 +240,8 @@ func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, dataColumnParamsByBlockRoot util.DataColumnsParamsByRoot) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
|
||||
@@ -28,8 +28,7 @@ func TestEnsureDeleteSetDiskSummary(t *testing.T) {
|
||||
|
||||
func TestStash(t *testing.T) {
|
||||
t.Run("Index too high", func(t *testing.T) {
|
||||
dataColumnParamsByBlockRoot := util.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 10_000}}}
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 10_000}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
@@ -37,8 +36,7 @@ func TestStash(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Nominal and already existing", func(t *testing.T) {
|
||||
dataColumnParamsByBlockRoot := util.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 1}}}
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
@@ -76,36 +74,30 @@ func TestFilterDataColumns(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Commitments not equal", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
dataColumnParamsByBlockRoot := util.DataColumnsParamsByRoot{root: {{ColumnIndex: 1}}}
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[1] = &roDataColumns[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
|
||||
_, err := dataColumnCacheEntry.filter(roDataColumns[0].BlockRoot(), &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{}
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
|
||||
dataColumnParamsByBlockRoot := util.DataColumnsParamsByRoot{root: {{ColumnIndex: 3, KzgCommitments: [][]byte{[]byte{3}}}}}
|
||||
expected, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
expected, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 3, KzgCommitments: [][]byte{[]byte{3}}}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[3] = &expected[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs, diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter(root, &commitmentsArray)
|
||||
actual, err := dataColumnCacheEntry.filter(expected[0].BlockRoot(), &commitmentsArray)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected, actual)
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/beacon-chain:__subpackages__",
|
||||
"//genesis:__subpackages__",
|
||||
"//testing/slasher/simulator:__pkg__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
|
||||
@@ -59,6 +59,7 @@ go_test(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -25,7 +25,7 @@ func directoryPermissions() os.FileMode {
|
||||
var (
|
||||
errIndexOutOfBounds = errors.New("blob index in file name >= MAX_BLOBS_PER_BLOCK")
|
||||
errSidecarEmptySSZData = errors.New("sidecar marshalled to an empty ssz byte slice")
|
||||
errNoBasePath = errors.New("BlobStorage base path not specified in init")
|
||||
errNoBlobBasePath = errors.New("BlobStorage base path not specified in init")
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
@@ -85,7 +85,7 @@ func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
// Allow tests to set up a different fs using WithFs.
|
||||
if b.fs == nil {
|
||||
if b.base == "" {
|
||||
return nil, errNoBasePath
|
||||
return nil, errNoBlobBasePath
|
||||
}
|
||||
b.base = path.Clean(b.base)
|
||||
if err := file.MkdirAll(b.base); err != nil {
|
||||
|
||||
@@ -160,7 +160,7 @@ func writeFakeSSZ(t *testing.T, fs afero.Fs, root [32]byte, slot primitives.Slot
|
||||
|
||||
func TestNewBlobStorage(t *testing.T) {
|
||||
_, err := NewBlobStorage()
|
||||
require.ErrorIs(t, err, errNoBasePath)
|
||||
require.ErrorIs(t, err, errNoBlobBasePath)
|
||||
_, err = NewBlobStorage(WithBasePath(path.Join(t.TempDir(), "good")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ var (
|
||||
errTooManyDataColumns = errors.New("too many data columns")
|
||||
errWrongSszEncodedDataColumnSidecarSize = errors.New("wrong SSZ encoded data column sidecar size")
|
||||
errDataColumnSidecarsFromDifferentSlots = errors.New("data column sidecars from different slots")
|
||||
errNoDataColumnBasePath = errors.New("DataColumnStorage base path not specified in init")
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -142,7 +143,7 @@ func NewDataColumnStorage(ctx context.Context, opts ...DataColumnStorageOption)
|
||||
// Allow tests to set up a different fs using WithFs.
|
||||
if storage.fs == nil {
|
||||
if storage.base == "" {
|
||||
return nil, errNoBasePath
|
||||
return nil, errNoDataColumnBasePath
|
||||
}
|
||||
|
||||
storage.base = path.Clean(storage.base)
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -19,7 +19,7 @@ func TestNewDataColumnStorage(t *testing.T) {
|
||||
|
||||
t.Run("No base path", func(t *testing.T) {
|
||||
_, err := NewDataColumnStorage(ctx)
|
||||
require.ErrorIs(t, err, errNoBasePath)
|
||||
require.ErrorIs(t, err, errNoDataColumnBasePath)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
@@ -41,32 +41,18 @@ func TestWarmCache(t *testing.T) {
|
||||
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{0}: {
|
||||
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
|
||||
},
|
||||
{1}: {
|
||||
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{3}: {
|
||||
{Slot: 128_034, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4001
|
||||
{Slot: 128_034, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4001
|
||||
},
|
||||
{4}: {
|
||||
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{5}: {
|
||||
{Slot: 131_138, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{6}: {
|
||||
{Slot: 131_168, ColumnIndex: 0, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 33, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 1
|
||||
{Slot: 128_002, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 1, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 3, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_034, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4001
|
||||
{Slot: 128_034, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4001
|
||||
{Slot: 131_138, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, Index: 1, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_168, Index: 0, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
)
|
||||
|
||||
@@ -77,29 +63,25 @@ func TestWarmCache(t *testing.T) {
|
||||
|
||||
storage.WarmCache()
|
||||
require.Equal(t, primitives.Epoch(4_000), storage.cache.lowestCachedEpoch)
|
||||
require.Equal(t, 6, len(storage.cache.cache))
|
||||
require.Equal(t, 5, len(storage.cache.cache))
|
||||
|
||||
summary, ok := storage.cache.get([fieldparams.RootLength]byte{1})
|
||||
summary, ok := storage.cache.get(verifiedRoDataColumnSidecars[2].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{2})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[4].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_000, mask: [fieldparams.NumberOfColumns]bool{false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{3})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[6].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_001, mask: [fieldparams.NumberOfColumns]bool{false, false, true, false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{4})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[8].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, false, true}}, summary)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, true, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{5})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_098, mask: [fieldparams.NumberOfColumns]bool{false, true}}, summary)
|
||||
|
||||
summary, ok = storage.cache.get([fieldparams.RootLength]byte{6})
|
||||
summary, ok = storage.cache.get(verifiedRoDataColumnSidecars[10].BlockRoot())
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, DataColumnStorageSummary{epoch: 4_099, mask: [fieldparams.NumberOfColumns]bool{true}}, summary)
|
||||
}
|
||||
@@ -113,9 +95,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}},
|
||||
},
|
||||
[]util.DataColumnParam{{Index: 12}, {Index: 1_000_000}, {Index: 48}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
@@ -126,7 +106,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("one of the column index is too large", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{}: {{ColumnIndex: 12}, {ColumnIndex: 1_000_000}, {ColumnIndex: 48}}},
|
||||
[]util.DataColumnParam{{Index: 12}, {Index: 1_000_000}, {Index: 48}},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
@@ -137,23 +117,34 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("different slots", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{}: {
|
||||
{Slot: 1, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 2, ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 2, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Create a sidecar with a different slot but the same root.
|
||||
alteredVerifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, 2)
|
||||
alteredVerifiedRoDataColumnSidecars = append(alteredVerifiedRoDataColumnSidecars, verifiedRoDataColumnSidecars[0])
|
||||
|
||||
altered, err := blocks.NewRODataColumnWithRoot(
|
||||
verifiedRoDataColumnSidecars[1].RODataColumn.DataColumnSidecar,
|
||||
verifiedRoDataColumnSidecars[0].BlockRoot(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedAltered := blocks.NewVerifiedRODataColumn(altered)
|
||||
alteredVerifiedRoDataColumnSidecars = append(alteredVerifiedRoDataColumnSidecars, verifiedAltered)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
err := dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
err = dataColumnStorage.Save(alteredVerifiedRoDataColumnSidecars)
|
||||
require.ErrorIs(t, err, errDataColumnSidecarsFromDifferentSlots)
|
||||
})
|
||||
|
||||
t.Run("new file - no data columns to save", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{}: {}},
|
||||
[]util.DataColumnParam{},
|
||||
)
|
||||
|
||||
_, dataColumnStorage := NewEphemeralDataColumnStorageAndFs(t)
|
||||
@@ -164,11 +155,9 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("new file - different data column size", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{1, 2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{1}, {2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -180,7 +169,9 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("existing file - wrong incoming SSZ encoded size", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Save data columns into a file.
|
||||
@@ -192,7 +183,9 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
// column index and an different SSZ encoded size.
|
||||
_, verifiedRoDataColumnSidecars = util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{{1}: {{ColumnIndex: 13, DataColumn: []byte{1, 2, 3, 4}}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{1}, {2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
// Try to rewrite the file.
|
||||
@@ -203,17 +196,13 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
{2}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 11, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}}, // OK if duplicate
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
{Slot: 2, Index: 12, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -223,16 +212,12 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
|
||||
_, inputVerifiedRoDataColumnSidecars = util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}, // OK if duplicate
|
||||
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
|
||||
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
{3}: {
|
||||
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}}, // OK if duplicate
|
||||
{Slot: 1, Index: 15, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 1, Index: 1, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 3, Index: 6, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 3, Index: 2, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -241,51 +226,47 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
|
||||
type fixture struct {
|
||||
fileName string
|
||||
blockRoot [fieldparams.RootLength]byte
|
||||
expectedIndices [mandatoryNumberOfColumns]byte
|
||||
dataColumnParams []util.DataColumnParams
|
||||
dataColumnParams []util.DataColumnParam
|
||||
}
|
||||
|
||||
fixtures := []fixture{
|
||||
{
|
||||
fileName: "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{1},
|
||||
fileName: "0/0/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs",
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, nonZeroOffset + 4, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, nonZeroOffset + 1, nonZeroOffset, nonZeroOffset + 2, 0, nonZeroOffset + 3,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []util.DataColumnParams{
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 11, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
{ColumnIndex: 15, DataColumn: []byte{2, 3, 4}},
|
||||
{ColumnIndex: 1, DataColumn: []byte{2, 3, 4}},
|
||||
dataColumnParams: []util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 11, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
{Slot: 1, Index: 15, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 1, Index: 1, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
fileName: "0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{2},
|
||||
fileName: "0/0/0x221f88cae2219050d4e9d8c2d0d83cb4c8ce4c84ab1bb3e0b89f3dec36077c4f.sszs",
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, nonZeroOffset, nonZeroOffset + 1, 0, 0,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []util.DataColumnParams{
|
||||
{ColumnIndex: 12, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}},
|
||||
dataColumnParams: []util.DataColumnParam{
|
||||
{Slot: 2, Index: 12, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
fileName: "0/0/0x0300000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
blockRoot: [fieldparams.RootLength]byte{3},
|
||||
fileName: "0/0/0x7b163bd57e1c4c8b5048c5389698098f4c957d62d7ce86f4ffa9bdc75c16a18b.sszs",
|
||||
expectedIndices: [mandatoryNumberOfColumns]byte{
|
||||
0, 0, nonZeroOffset + 1, 0, 0, 0, nonZeroOffset, 0,
|
||||
// The rest is filled with zeroes.
|
||||
},
|
||||
dataColumnParams: []util.DataColumnParams{
|
||||
{ColumnIndex: 6, DataColumn: []byte{3, 4, 5}},
|
||||
{ColumnIndex: 2, DataColumn: []byte{6, 7, 8}},
|
||||
dataColumnParams: []util.DataColumnParam{
|
||||
{Slot: 3, Index: 6, Column: [][]byte{{3}, {4}, {5}}},
|
||||
{Slot: 3, Index: 2, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -294,7 +275,7 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
// Build expected data column sidecars.
|
||||
_, expectedDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{fixture.blockRoot: fixture.dataColumnParams},
|
||||
fixture.dataColumnParams,
|
||||
)
|
||||
|
||||
// Build expected bytes.
|
||||
@@ -321,6 +302,8 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
expectedBytes = append(expectedBytes, fixture.expectedIndices[:]...)
|
||||
expectedBytes = append(expectedBytes, sszEncodedDataColumnSidecars...)
|
||||
|
||||
blockRoot := expectedDataColumnSidecars[0].BlockRoot()
|
||||
|
||||
// Check the actual content of the file.
|
||||
actualBytes, err := afero.ReadFile(dataColumnStorage.fs, fixture.fileName)
|
||||
require.NoError(t, err)
|
||||
@@ -329,18 +312,18 @@ func TestSaveDataColumnsSidecars(t *testing.T) {
|
||||
// Check the summary.
|
||||
indices := map[uint64]bool{}
|
||||
for _, dataColumnParam := range fixture.dataColumnParams {
|
||||
indices[dataColumnParam.ColumnIndex] = true
|
||||
indices[dataColumnParam.Index] = true
|
||||
}
|
||||
|
||||
summary := dataColumnStorage.Summary(fixture.blockRoot)
|
||||
summary := dataColumnStorage.Summary(blockRoot)
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, indices[index], summary.HasIndex(index))
|
||||
}
|
||||
|
||||
err = dataColumnStorage.Remove(fixture.blockRoot)
|
||||
err = dataColumnStorage.Remove(blockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
summary = dataColumnStorage.Summary(fixture.blockRoot)
|
||||
summary = dataColumnStorage.Summary(blockRoot)
|
||||
for index := range uint64(mandatoryNumberOfColumns) {
|
||||
require.Equal(t, false, summary.HasIndex(index))
|
||||
}
|
||||
@@ -363,11 +346,9 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
t.Run("indices not found", func(t *testing.T) {
|
||||
_, savedVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 14, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Index: 14, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -375,7 +356,7 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
err := dataColumnStorage.Save(savedVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{3, 1, 2})
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get(savedVerifiedRoDataColumnSidecars[0].BlockRoot(), []uint64{3, 1, 2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedRODataColumnSidecars))
|
||||
})
|
||||
@@ -383,11 +364,9 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, expectedVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}},
|
||||
{ColumnIndex: 14, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Index: 14, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -395,11 +374,13 @@ func TestGetDataColumnSidecars(t *testing.T) {
|
||||
err := dataColumnStorage.Save(expectedVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
|
||||
root := expectedVerifiedRoDataColumnSidecars[0].BlockRoot()
|
||||
|
||||
verifiedRODataColumnSidecars, err := dataColumnStorage.Get(root, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
|
||||
|
||||
verifiedRODataColumnSidecars, err = dataColumnStorage.Get([fieldparams.RootLength]byte{1}, []uint64{12, 13, 14})
|
||||
verifiedRODataColumnSidecars, err = dataColumnStorage.Get(root, []uint64{12, 13, 14})
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, expectedVerifiedRoDataColumnSidecars, verifiedRODataColumnSidecars)
|
||||
})
|
||||
@@ -415,15 +396,11 @@ func TestRemove(t *testing.T) {
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {
|
||||
{Slot: 32, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 32, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 33, ColumnIndex: 10, DataColumn: []byte{1, 2, 3}},
|
||||
{Slot: 33, ColumnIndex: 11, DataColumn: []byte{2, 3, 4}},
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 32, Index: 10, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 32, Index: 11, Column: [][]byte{{2}, {3}, {4}}},
|
||||
{Slot: 33, Index: 10, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 33, Index: 11, Column: [][]byte{{2}, {3}, {4}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -431,22 +408,22 @@ func TestRemove(t *testing.T) {
|
||||
err := dataColumnStorage.Save(inputVerifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = dataColumnStorage.Remove([fieldparams.RootLength]byte{1})
|
||||
err = dataColumnStorage.Remove(inputVerifiedRoDataColumnSidecars[0].BlockRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary([fieldparams.RootLength]byte{1})
|
||||
summary := dataColumnStorage.Summary(inputVerifiedRoDataColumnSidecars[0].BlockRoot())
|
||||
require.Equal(t, primitives.Epoch(0), summary.epoch)
|
||||
require.Equal(t, uint64(0), summary.Count())
|
||||
|
||||
summary = dataColumnStorage.Summary([fieldparams.RootLength]byte{2})
|
||||
summary = dataColumnStorage.Summary(inputVerifiedRoDataColumnSidecars[3].BlockRoot())
|
||||
require.Equal(t, primitives.Epoch(1), summary.epoch)
|
||||
require.Equal(t, uint64(2), summary.Count())
|
||||
|
||||
actual, err := dataColumnStorage.Get([fieldparams.RootLength]byte{1}, nil)
|
||||
actual, err := dataColumnStorage.Get(inputVerifiedRoDataColumnSidecars[0].BlockRoot(), nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(actual))
|
||||
|
||||
actual, err = dataColumnStorage.Get([fieldparams.RootLength]byte{2}, nil)
|
||||
actual, err = dataColumnStorage.Get(inputVerifiedRoDataColumnSidecars[3].BlockRoot(), nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(actual))
|
||||
})
|
||||
@@ -455,9 +432,9 @@ func TestRemove(t *testing.T) {
|
||||
func TestClear(t *testing.T) {
|
||||
_, inputVerifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
|
||||
{2}: {{ColumnIndex: 13, DataColumn: []byte{6, 7, 8}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{6}, {7}, {8}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -466,8 +443,8 @@ func TestClear(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
filePaths := []string{
|
||||
"0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0/0/0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0/0/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs",
|
||||
"0/0/0x221f88cae2219050d4e9d8c2d0d83cb4c8ce4c84ab1bb3e0b89f3dec36077c4f.sszs",
|
||||
}
|
||||
|
||||
for _, filePath := range filePaths {
|
||||
@@ -493,8 +470,8 @@ func TestMetadata(t *testing.T) {
|
||||
t.Run("wrong version", func(t *testing.T) {
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{1}: {{ColumnIndex: 12, DataColumn: []byte{1, 2, 3}}},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
},
|
||||
)
|
||||
|
||||
@@ -504,7 +481,7 @@ func TestMetadata(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Alter the version.
|
||||
const filePath = "0/0/0x0100000000000000000000000000000000000000000000000000000000000000.sszs"
|
||||
const filePath = "0/0/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs"
|
||||
file, err := dataColumnStorage.fs.OpenFile(filePath, os.O_WRONLY, os.FileMode(0600))
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -644,31 +621,19 @@ func TestPrune(t *testing.T) {
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{0}: {
|
||||
{Slot: 33, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 1
|
||||
},
|
||||
{1}: {
|
||||
{Slot: 128_002, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, ColumnIndex: 4, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{2}: {
|
||||
{Slot: 128_003, ColumnIndex: 1, DataColumn: []byte{1, 2, 3}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, ColumnIndex: 3, DataColumn: []byte{2, 3, 4}}, // Period 0 - Epoch 4000
|
||||
},
|
||||
{3}: {
|
||||
{Slot: 131_138, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4098
|
||||
},
|
||||
{4}: {
|
||||
{Slot: 131_169, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
{Slot: 131_169, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 1 - Epoch 4099
|
||||
},
|
||||
{5}: {
|
||||
{Slot: 262_144, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8192
|
||||
{Slot: 262_144, ColumnIndex: 3, DataColumn: []byte{1, 2, 3}}, // Period 2 - Epoch 8292
|
||||
},
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 33, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 1
|
||||
{Slot: 33, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 1
|
||||
{Slot: 128_002, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_002, Index: 4, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 1, Column: [][]byte{{1}, {2}, {3}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 128_003, Index: 3, Column: [][]byte{{2}, {3}, {4}}}, // Period 0 - Epoch 4000
|
||||
{Slot: 131_138, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_138, Index: 3, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4098
|
||||
{Slot: 131_169, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4099
|
||||
{Slot: 131_169, Index: 3, Column: [][]byte{{1}, {2}, {3}}}, // Period 1 - Epoch 4099
|
||||
{Slot: 262_144, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 2 - Epoch 8192
|
||||
{Slot: 262_144, Index: 3, Column: [][]byte{{1}, {2}, {3}}}, // Period 2 - Epoch 8292
|
||||
},
|
||||
)
|
||||
|
||||
@@ -697,31 +662,31 @@ func TestPrune(t *testing.T) {
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0/1")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0000000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x775283f428813c949b7e8af07f01fef9790137f021b3597ad2d0d81e8be8f0f0.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "0/4000")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{
|
||||
"0x0200000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0x0100000000000000000000000000000000000000000000000000000000000000.sszs",
|
||||
"0x9977031132157ebb9c81bce952003ce07a4f54e921ca63b7693d1562483fdf9f.sszs",
|
||||
"0xb2b14d9d858fa99b70f0405e4e39f38e51e36dd9a70343c109e24eeb5f77e369.sszs",
|
||||
}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4098")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0300000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x5106745cdd6b1aa3602ef4d000ef373af672019264c167fa4bd641a1094aa5c5.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x4e5f2bd5bb84bf0422af8edd1cc5a52cc6cea85baf3d66d172fe41831ac1239c.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0xa8adba7446eb56a01a9dd6d55e9c3990b10c91d43afb77847b4a21ac4ee62527.sszs"}, dirs))
|
||||
|
||||
_, verifiedRoDataColumnSidecars = util.CreateTestVerifiedRoDataColumnSidecars(
|
||||
t,
|
||||
util.DataColumnsParamsByRoot{
|
||||
{6}: {{Slot: 451_141, ColumnIndex: 2, DataColumn: []byte{1, 2, 3}}}, // Period 3 - Epoch 14_098
|
||||
[]util.DataColumnParam{
|
||||
{Slot: 451_141, Index: 2, Column: [][]byte{{1}, {2}, {3}}}, // Period 3 - Epoch 14_098
|
||||
},
|
||||
)
|
||||
|
||||
@@ -749,14 +714,14 @@ func TestPrune(t *testing.T) {
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "1/4099")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0400000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x4e5f2bd5bb84bf0422af8edd1cc5a52cc6cea85baf3d66d172fe41831ac1239c.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "2/8192")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0500000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0xa8adba7446eb56a01a9dd6d55e9c3990b10c91d43afb77847b4a21ac4ee62527.sszs"}, dirs))
|
||||
|
||||
dirs, err = listDir(dataColumnStorage.fs, "3/14098")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, compareSlices([]string{"0x0600000000000000000000000000000000000000000000000000000000000000.sszs"}, dirs))
|
||||
require.Equal(t, true, compareSlices([]string{"0x0de28a18cae63cbc6f0b20dc1afb0b1df38da40824a5f09f92d485ade04de97f.sszs"}, dirs))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -38,7 +38,6 @@ go_library(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -50,6 +49,7 @@ go_library(
|
||||
"//container/slice:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//monitoring/progress:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
@@ -106,7 +106,6 @@ go_test(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -116,6 +115,7 @@ go_test(
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis/embedded:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user