mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Use Data Column Validation Across Prysm (#14377)
* Use Data Column Validation Everywhere * Fix Build * Fix Lint * Fix Clock Synchronizer * Fix Panic
This commit is contained in:
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async"
|
||||
@@ -57,6 +58,8 @@ type dataColumnSampler1D struct {
|
||||
columnFromPeer map[peer.ID]map[uint64]bool
|
||||
// peerFromColumn maps a column to the peer responsible for custody.
|
||||
peerFromColumn map[uint64]map[peer.ID]bool
|
||||
// columnVerifier verifies a column according to the specified requirements.
|
||||
columnVerifier verification.NewColumnVerifier
|
||||
}
|
||||
|
||||
// newDataColumnSampler1D creates a new 1D data column sampler.
|
||||
@@ -65,6 +68,7 @@ func newDataColumnSampler1D(
|
||||
clock *startup.Clock,
|
||||
ctxMap ContextByteVersions,
|
||||
stateNotifier statefeed.Notifier,
|
||||
colVerifier verification.NewColumnVerifier,
|
||||
) *dataColumnSampler1D {
|
||||
numColumns := params.BeaconConfig().NumberOfColumns
|
||||
peerFromColumn := make(map[uint64]map[peer.ID]bool, numColumns)
|
||||
@@ -79,6 +83,7 @@ func newDataColumnSampler1D(
|
||||
stateNotifier: stateNotifier,
|
||||
columnFromPeer: make(map[peer.ID]map[uint64]bool),
|
||||
peerFromColumn: peerFromColumn,
|
||||
columnVerifier: colVerifier,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -426,7 +431,7 @@ func (d *dataColumnSampler1D) sampleDataColumnsFromPeer(
|
||||
}
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
if verifyColumn(roDataColumn, root, pid, requestedColumns) {
|
||||
if verifyColumn(roDataColumn, root, pid, requestedColumns, d.columnVerifier) {
|
||||
retrievedColumns[roDataColumn.ColumnIndex] = true
|
||||
}
|
||||
}
|
||||
@@ -500,6 +505,7 @@ func verifyColumn(
|
||||
root [32]byte,
|
||||
pid peer.ID,
|
||||
requestedColumns map[uint64]bool,
|
||||
columnVerifier verification.NewColumnVerifier,
|
||||
) bool {
|
||||
retrievedColumn := roDataColumn.ColumnIndex
|
||||
|
||||
@@ -528,38 +534,25 @@ func verifyColumn(
|
||||
return false
|
||||
}
|
||||
|
||||
vf := columnVerifier(roDataColumn, verification.SamplingColumnSidecarRequirements)
|
||||
// Filter out columns which did not pass the KZG inclusion proof verification.
|
||||
if err := blocks.VerifyKZGInclusionProofColumn(roDataColumn); err != nil {
|
||||
if err := vf.SidecarInclusionProven(); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"index": retrievedColumn,
|
||||
}).Debug("Failed to verify KZG inclusion proof for retrieved column")
|
||||
|
||||
}).WithError(err).Debug("Failed to verify KZG inclusion proof for retrieved column")
|
||||
return false
|
||||
}
|
||||
|
||||
// Filter out columns which did not pass the KZG proof verification.
|
||||
verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(roDataColumn)
|
||||
if err != nil {
|
||||
if err := vf.SidecarKzgProofVerified(); err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"index": retrievedColumn,
|
||||
}).Debug("Error when verifying KZG proof for retrieved column")
|
||||
|
||||
}).WithError(err).Debug("Failed to verify KZG proof for retrieved column")
|
||||
return false
|
||||
}
|
||||
|
||||
if !verified {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"index": retrievedColumn,
|
||||
}).Debug("Failed to verify KZG proof for retrieved column")
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
kzg "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
@@ -196,7 +198,12 @@ func setupDataColumnSamplerTest(t *testing.T, blobCount uint64) (*dataSamplerTes
|
||||
kzgProofs: kzgProofs,
|
||||
dataColumnSidecars: dataColumnSidecars,
|
||||
}
|
||||
sampler := newDataColumnSampler1D(p2pSvc, clock, test.ctxMap, nil)
|
||||
clockSync := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clockSync.SetClock(clock))
|
||||
iniWaiter := verification.NewInitializerWaiter(clockSync, nil, nil)
|
||||
ini, err := iniWaiter.WaitForInitializer(context.Background())
|
||||
require.NoError(t, err)
|
||||
sampler := newDataColumnSampler1D(p2pSvc, clock, test.ctxMap, nil, newColumnVerifierFromInitializer(ini))
|
||||
|
||||
return test, sampler
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
@@ -82,6 +83,8 @@ type blocksFetcherConfig struct {
|
||||
peerFilterCapacityWeight float64
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewColumnVerifier
|
||||
}
|
||||
|
||||
// blocksFetcher is a service to fetch chain data from peers.
|
||||
@@ -98,6 +101,8 @@ type blocksFetcher struct {
|
||||
p2p p2p.P2P
|
||||
db db.ReadOnlyDatabase
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewColumnVerifier
|
||||
blocksPerPeriod uint64
|
||||
rateLimiter *leakybucket.Collector
|
||||
peerLocks map[peer.ID]*peerLock
|
||||
@@ -157,6 +162,8 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
bs: cfg.bs,
|
||||
bv: cfg.bv,
|
||||
cv: cfg.cv,
|
||||
blocksPerPeriod: uint64(blocksPerPeriod),
|
||||
rateLimiter: rateLimiter,
|
||||
peerLocks: make(map[peer.ID]*peerLock),
|
||||
@@ -957,6 +964,7 @@ func processRetrievedDataColumns(
|
||||
indicesFromRoot map[[fieldparams.RootLength]byte][]int,
|
||||
missingColumnsFromRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
bwb []blocks.BlockWithROBlobs,
|
||||
colVerifier verification.NewColumnVerifier,
|
||||
) {
|
||||
retrievedColumnsFromRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
|
||||
@@ -977,7 +985,7 @@ func processRetrievedDataColumns(
|
||||
}
|
||||
|
||||
// Verify the data column.
|
||||
if err := verify.ColumnAlignsWithBlock(dataColumn, blockFromRoot[root]); err != nil {
|
||||
if err := verify.ColumnAlignsWithBlock(dataColumn, blockFromRoot[root], colVerifier); err != nil {
|
||||
// TODO: Should we downscore the peer for that?
|
||||
continue
|
||||
}
|
||||
@@ -1072,7 +1080,7 @@ func (f *blocksFetcher) retrieveMissingDataColumnsFromPeers(ctx context.Context,
|
||||
}
|
||||
|
||||
// Process the retrieved data columns.
|
||||
processRetrievedDataColumns(roDataColumns, blockFromRoot, indicesFromRoot, missingColumnsFromRoot, bwb)
|
||||
processRetrievedDataColumns(roDataColumns, blockFromRoot, indicesFromRoot, missingColumnsFromRoot, bwb, f.cv)
|
||||
|
||||
if len(missingColumnsFromRoot) > 0 {
|
||||
for root, columns := range missingColumnsFromRoot {
|
||||
|
||||
@@ -32,6 +32,7 @@ import (
|
||||
p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
beaconsync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -2094,6 +2095,11 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
for _, roBlock := range roBlocks {
|
||||
bwb = append(bwb, blocks.BlockWithROBlobs{Block: roBlock})
|
||||
}
|
||||
clockSync := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clockSync.SetClock(clock))
|
||||
iniWaiter := verification.NewInitializerWaiter(clockSync, nil, nil)
|
||||
ini, err := iniWaiter.WaitForInitializer(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create the block fetcher.
|
||||
blocksFetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{
|
||||
@@ -2101,6 +2107,7 @@ func TestFetchDataColumnsFromPeers(t *testing.T) {
|
||||
ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Deneb},
|
||||
p2p: p2pSvc,
|
||||
bs: blobStorageSummarizer,
|
||||
cv: newColumnVerifierFromInitializer(ini),
|
||||
})
|
||||
|
||||
// Fetch the data columns from the peers.
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
beaconsync "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -71,6 +72,8 @@ type blocksQueueConfig struct {
|
||||
db db.ReadOnlyDatabase
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
bv verification.NewBlobVerifier
|
||||
cv verification.NewColumnVerifier
|
||||
}
|
||||
|
||||
// blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers)
|
||||
@@ -113,6 +116,8 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
|
||||
db: cfg.db,
|
||||
clock: cfg.clock,
|
||||
bs: cfg.bs,
|
||||
bv: cfg.bv,
|
||||
cv: cfg.cv,
|
||||
})
|
||||
}
|
||||
highestExpectedSlot := cfg.highestExpectedSlot
|
||||
|
||||
@@ -88,6 +88,8 @@ func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.S
|
||||
highestExpectedSlot: highestSlot,
|
||||
mode: mode,
|
||||
bs: summarizer,
|
||||
bv: s.newBlobVerifier,
|
||||
cv: s.newColumnVerifier,
|
||||
}
|
||||
queue := newBlocksQueue(ctx, cfg)
|
||||
if err := queue.start(); err != nil {
|
||||
@@ -174,7 +176,8 @@ func (s *Service) processFetchedDataRegSync(
|
||||
return
|
||||
}
|
||||
if coreTime.PeerDASIsActive(startSlot) {
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, emptyVerifier{}, s.cfg.P2P.NodeID())
|
||||
bv := verification.NewDataColumnBatchVerifier(s.newColumnVerifier, verification.InitsyncColumnSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, bv, s.cfg.P2P.NodeID())
|
||||
batchFields := logrus.Fields{
|
||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
||||
"firstUnprocessed": bwb[0].Block.Block().Slot(),
|
||||
@@ -358,7 +361,8 @@ func (s *Service) processBatchedBlocks(ctx context.Context, genesis time.Time,
|
||||
}
|
||||
var aStore das.AvailabilityStore
|
||||
if coreTime.PeerDASIsActive(first.Block().Slot()) {
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, emptyVerifier{}, s.cfg.P2P.NodeID())
|
||||
bv := verification.NewDataColumnBatchVerifier(s.newColumnVerifier, verification.InitsyncColumnSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, bv, s.cfg.P2P.NodeID())
|
||||
s.logBatchSyncStatus(genesis, first, len(bwb))
|
||||
for _, bb := range bwb {
|
||||
if len(bb.Columns) == 0 {
|
||||
@@ -420,15 +424,3 @@ func (s *Service) isProcessedBlock(ctx context.Context, blk blocks.ROBlock) bool
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type emptyVerifier struct {
|
||||
}
|
||||
|
||||
func (_ emptyVerifier) VerifiedRODataColumns(_ context.Context, _ blocks.ROBlock, cols []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
var verCols []blocks.VerifiedRODataColumn
|
||||
for _, col := range cols {
|
||||
vCol := blocks.NewVerifiedRODataColumn(col)
|
||||
verCols = append(verCols, vCol)
|
||||
}
|
||||
return verCols, nil
|
||||
}
|
||||
|
||||
@@ -60,17 +60,18 @@ type Config struct {
|
||||
|
||||
// Service service.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
newColumnVerifier verification.NewColumnVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
}
|
||||
|
||||
// Option is a functional option for the initial-sync Service.
|
||||
@@ -151,6 +152,7 @@ func (s *Service) Start() {
|
||||
return
|
||||
}
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
s.newColumnVerifier = newColumnVerifierFromInitializer(v)
|
||||
|
||||
gt := clock.GenesisTime()
|
||||
if gt.IsZero() {
|
||||
@@ -454,7 +456,8 @@ func (s *Service) fetchOriginColumns(pids []peer.ID) error {
|
||||
if len(sidecars) != len(req) {
|
||||
continue
|
||||
}
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, emptyVerifier{}, s.cfg.P2P.NodeID())
|
||||
bv := verification.NewDataColumnBatchVerifier(s.newColumnVerifier, verification.InitsyncColumnSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStoreColumn(s.cfg.BlobStorage, bv, s.cfg.P2P.NodeID())
|
||||
current := s.clock.CurrentSlot()
|
||||
if err := avs.PersistColumns(current, sidecars...); err != nil {
|
||||
return err
|
||||
@@ -481,3 +484,9 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
func newColumnVerifierFromInitializer(ini *verification.Initializer) verification.NewColumnVerifier {
|
||||
return func(d blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnVerifier {
|
||||
return ini.NewColumnVerifier(d, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,7 +201,7 @@ func (s *Service) sendAndSaveDataColumnSidecars(ctx context.Context, request typ
|
||||
return err
|
||||
}
|
||||
for _, sidecar := range sidecars {
|
||||
if err := verify.ColumnAlignsWithBlock(sidecar, RoBlock); err != nil {
|
||||
if err := verify.ColumnAlignsWithBlock(sidecar, RoBlock, s.newColumnVerifier); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(columnFields(sidecar)).Debug("Received data column sidecar RPC")
|
||||
|
||||
@@ -362,7 +362,7 @@ func (s *Service) startTasksPostInitialSync() {
|
||||
|
||||
// Start data columns sampling if peerDAS is enabled.
|
||||
if params.PeerDASEnabled() {
|
||||
s.sampler = newDataColumnSampler1D(s.cfg.p2p, s.cfg.clock, s.ctxMap, s.cfg.stateNotifier)
|
||||
s.sampler = newDataColumnSampler1D(s.cfg.p2p, s.cfg.clock, s.ctxMap, s.cfg.stateNotifier, s.newColumnVerifier)
|
||||
go s.sampler.Run(s.ctx)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/sync/verify",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -52,15 +52,11 @@ func BlobAlignsWithBlock(blob blocks.ROBlob, block blocks.ROBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ColumnAlignsWithBlock(col blocks.RODataColumn, block blocks.ROBlock) error {
|
||||
func ColumnAlignsWithBlock(col blocks.RODataColumn, block blocks.ROBlock, colVerifier verification.NewColumnVerifier) error {
|
||||
if block.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
if col.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(ErrIncorrectColumnIndex, "index %d exceeds NUMBERS_OF_COLUMN %d", col.ColumnIndex, fieldparams.NumberOfColumns)
|
||||
}
|
||||
|
||||
if col.BlockRoot() != block.Root() {
|
||||
return ErrColumnBlockMisaligned
|
||||
}
|
||||
@@ -74,21 +70,19 @@ func ColumnAlignsWithBlock(col blocks.RODataColumn, block blocks.ROBlock) error
|
||||
if !reflect.DeepEqual(commitments, col.KzgCommitments) {
|
||||
return errors.Wrapf(ErrMismatchedColumnCommitments, "commitment %#v != block commitment %#v for block root %#x at slot %d ", col.KzgCommitments, commitments, block.Root(), col.Slot())
|
||||
}
|
||||
vf := colVerifier(col, verification.InitsyncColumnSidecarRequirements)
|
||||
if err := vf.DataColumnIndexInBounds(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter out columns which did not pass the KZG inclusion proof verification.
|
||||
if err := blocks.VerifyKZGInclusionProofColumn(col); err != nil {
|
||||
if err := vf.SidecarInclusionProven(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter out columns which did not pass the KZG proof verification.
|
||||
verified, err := peerdas.VerifyDataColumnSidecarKZGProofs(col)
|
||||
if err != nil {
|
||||
if err := vf.SidecarKzgProofVerified(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !verified {
|
||||
return errors.New("data column sidecar KZG proofs failed verification")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
)
|
||||
@@ -92,3 +93,85 @@ func (batch *BlobBatchVerifier) verifyOneBlob(sc blocks.ROBlob) (blocks.Verified
|
||||
|
||||
return bv.VerifiedROBlob()
|
||||
}
|
||||
|
||||
// NewDataColumnBatchVerifier initializes a data column batch verifier. It requires the caller to correctly specify
|
||||
// verification Requirements and to also pass in a NewColumnVerifier, which is a callback function that
|
||||
// returns a new ColumnVerifier for handling a single column in the batch.
|
||||
func NewDataColumnBatchVerifier(newVerifier NewColumnVerifier, reqs []Requirement) *DataColumnBatchVerifier {
|
||||
return &DataColumnBatchVerifier{
|
||||
verifyKzg: peerdas.VerifyDataColumnSidecarKZGProofs,
|
||||
newVerifier: newVerifier,
|
||||
reqs: reqs,
|
||||
}
|
||||
}
|
||||
|
||||
// DataColumnBatchVerifier solves problems that come from verifying batches of data columns from RPC.
|
||||
// First: we only update forkchoice after the entire batch has completed, so the n+1 elements in the batch
|
||||
// won't be in forkchoice yet.
|
||||
// Second: it is more efficient to batch some verifications, like kzg commitment verification. Batch adds a
|
||||
// method to ColumnVerifier to verify the kzg commitments of all data column sidecars for a block together, then using the cached
|
||||
// result of the batch verification when verifying the individual columns.
|
||||
type DataColumnBatchVerifier struct {
|
||||
verifyKzg rodataColumnCommitmentVerifier
|
||||
newVerifier NewColumnVerifier
|
||||
reqs []Requirement
|
||||
}
|
||||
|
||||
// VerifiedRODataColumns satisfies the das.ColumnBatchVerifier interface, used by das.AvailabilityStore.
|
||||
func (batch *DataColumnBatchVerifier) VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
if len(scs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
blkSig := blk.Signature()
|
||||
// We assume the proposer is validated wrt the block in batch block processing before performing the DA check.
|
||||
// So at this stage we just need to make sure the value being signed and signature bytes match the block.
|
||||
for i := range scs {
|
||||
blobSig := bytesutil.ToBytes96(scs[i].SignedBlockHeader.Signature)
|
||||
if blkSig != blobSig {
|
||||
return nil, ErrBatchSignatureMismatch
|
||||
}
|
||||
// Extra defensive check to make sure the roots match. This should be unnecessary in practice since the root from
|
||||
// the block should be used as the lookup key into the cache of sidecars.
|
||||
if blk.Root() != scs[i].BlockRoot() {
|
||||
return nil, ErrBatchBlockRootMismatch
|
||||
}
|
||||
}
|
||||
// Verify commitments for all columns at once. verifyOneColumn assumes it is only called once this check succeeds.
|
||||
for i := range scs {
|
||||
verified, err := batch.verifyKzg(scs[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !verified {
|
||||
return nil, ErrSidecarKzgProofInvalid
|
||||
}
|
||||
}
|
||||
|
||||
vs := make([]blocks.VerifiedRODataColumn, len(scs))
|
||||
for i := range scs {
|
||||
vb, err := batch.verifyOneColumn(scs[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vs[i] = vb
|
||||
}
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
func (batch *DataColumnBatchVerifier) verifyOneColumn(sc blocks.RODataColumn) (blocks.VerifiedRODataColumn, error) {
|
||||
vb := blocks.VerifiedRODataColumn{}
|
||||
bv := batch.newVerifier(sc, batch.reqs)
|
||||
// We can satisfy the following 2 requirements immediately because VerifiedROColumns always verifies commitments
|
||||
// and block signature for all columns in the batch before calling verifyOneColumn.
|
||||
bv.SatisfyRequirement(RequireSidecarKzgProofVerified)
|
||||
bv.SatisfyRequirement(RequireValidProposerSignature)
|
||||
|
||||
if err := bv.DataColumnIndexInBounds(); err != nil {
|
||||
return vb, err
|
||||
}
|
||||
if err := bv.SidecarInclusionProven(); err != nil {
|
||||
return vb, err
|
||||
}
|
||||
|
||||
return bv.VerifiedRODataColumn()
|
||||
}
|
||||
|
||||
@@ -40,11 +40,9 @@ var GossipColumnSidecarRequirements = requirementList(allColumnSidecarRequiremen
|
||||
var SpectestColumnSidecarRequirements = requirementList(GossipColumnSidecarRequirements).excluding(
|
||||
RequireSidecarParentSeen, RequireSidecarParentValid)
|
||||
|
||||
// InitsyncColumnSidecarRequirements is the list of verification requirements to be used by the init-sync service
|
||||
// for batch-mode syncing. Because we only perform batch verification as part of the IsDataAvailable method
|
||||
// for data columns after the block has been verified, and the blobs to be verified are keyed in the cache by the
|
||||
// block root, the list of required verifications is much shorter than gossip.
|
||||
var InitsyncColumnSidecarRequirements = requirementList(GossipColumnSidecarRequirements).excluding(
|
||||
// SamplingColumnSidecarRequirements are the column verification requirements that are necessary for columns
|
||||
// received via sampling.
|
||||
var SamplingColumnSidecarRequirements = requirementList(allColumnSidecarRequirements).excluding(
|
||||
RequireNotFromFutureSlot,
|
||||
RequireSlotAboveFinalized,
|
||||
RequireSidecarParentSeen,
|
||||
@@ -54,6 +52,12 @@ var InitsyncColumnSidecarRequirements = requirementList(GossipColumnSidecarRequi
|
||||
RequireSidecarProposerExpected,
|
||||
)
|
||||
|
||||
// InitsyncColumnSidecarRequirements is the list of verification requirements to be used by the init-sync service
|
||||
// for batch-mode syncing. Because we only perform batch verification as part of the IsDataAvailable method
|
||||
// for data columns after the block has been verified, and the blobs to be verified are keyed in the cache by the
|
||||
// block root, the list of required verifications is much shorter than gossip.
|
||||
var InitsyncColumnSidecarRequirements = requirementList(SamplingColumnSidecarRequirements).excluding()
|
||||
|
||||
// BackfillColumnSidecarRequirements is the same as InitsyncColumnSidecarRequirements.
|
||||
var BackfillColumnSidecarRequirements = requirementList(InitsyncColumnSidecarRequirements).excluding()
|
||||
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Forkchoicer represents the forkchoice methods that the verifiers need.
|
||||
@@ -70,37 +68,6 @@ func (ini *Initializer) NewColumnVerifier(d blocks.RODataColumn, reqs []Requirem
|
||||
}
|
||||
}
|
||||
|
||||
func (ini *Initializer) VerifyProposer(ctx context.Context, dc blocks.RODataColumn) error {
|
||||
e := slots.ToEpoch(dc.Slot())
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
r, err := ini.shared.fc.TargetRootForEpoch(dc.ParentRoot(), e)
|
||||
if err != nil {
|
||||
return ErrSidecarUnexpectedProposer
|
||||
}
|
||||
c := &forkchoicetypes.Checkpoint{Root: r, Epoch: e}
|
||||
idx, cached := ini.shared.pc.Proposer(c, dc.Slot())
|
||||
if !cached {
|
||||
pst, err := ini.shared.sr.StateByRoot(ctx, dc.ParentRoot())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("state replay to parent_root failed")
|
||||
return ErrSidecarUnexpectedProposer
|
||||
}
|
||||
idx, err = ini.shared.pc.ComputeProposer(ctx, dc.ParentRoot(), dc.Slot(), pst)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("error computing proposer index from parent state")
|
||||
return ErrSidecarUnexpectedProposer
|
||||
}
|
||||
}
|
||||
if idx != dc.ProposerIndex() {
|
||||
log.WithError(ErrSidecarUnexpectedProposer).WithField("expectedProposer", idx).
|
||||
Debug("unexpected blob proposer")
|
||||
return ErrSidecarUnexpectedProposer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InitializerWaiter provides an Initializer once all dependent resources are ready
|
||||
// via the WaitForInitializer method.
|
||||
type InitializerWaiter struct {
|
||||
|
||||
Reference in New Issue
Block a user