mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 23:18:15 -05:00
Implement data column sidecars verifications. (#15232)
* Logging: Add `DataColumnFields`. * `RODataColumn`: Implement `Slot`, `ParentRoot` and `ProposerIndex`. * Implement verification for data column sidecars. * Add changelog. * Fix Terence's comment. * Fix Terence's comment. * `SidecarProposerExpected`: Stop returning "sidecar was not proposed by the expected proposer_index" when there is any error in the function. * `SidecarProposerExpected` & `ValidProposerSignature`: Cache the parent state. * `VerifyDataColumnsSidecarKZGProofs`: Add benchmarks. * Fix Kasey's comment. * Add additional benchmark. * Fix Kasey's comment. * Fix Kasey's comment. * Fix Kasey's comment. * Fix Preston's comment. * Fix Preston's comment. * Fix Preston's comment.
This commit is contained in:
@@ -2,6 +2,7 @@ package peerdas_test
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
@@ -52,51 +53,15 @@ func TestVerifyDataColumnSidecar(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
const (
|
||||
blobCount = 6
|
||||
seed = 0
|
||||
)
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
generateSidecars := func(t *testing.T) []*ethpb.DataColumnSidecar {
|
||||
const blobCount = int64(6)
|
||||
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
|
||||
commitments := make([][]byte, 0, blobCount)
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
|
||||
for i := range blobCount {
|
||||
blob := getRandBlob(i)
|
||||
commitment, _, err := generateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitments = append(commitments, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = commitments
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
return sidecars
|
||||
}
|
||||
|
||||
generateRODataColumnSidecars := func(t *testing.T, sidecars []*ethpb.DataColumnSidecar) []blocks.RODataColumn {
|
||||
roDataColumnSidecars := make([]blocks.RODataColumn, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
roDataColumnSidecars = append(roDataColumnSidecars, roCol)
|
||||
}
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
t.Run("invalid proof", func(t *testing.T) {
|
||||
sidecars := generateSidecars(t)
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
sidecars[0].Column[0][0]++ // It is OK to overflow
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
@@ -105,7 +70,7 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
sidecars := generateSidecars(t)
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
@@ -281,6 +246,96 @@ func TestCustodyGroupCountFromRecord(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testing.B) {
|
||||
const blobCount = 12
|
||||
err := kzg.Start()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.StopTimer()
|
||||
b.ResetTimer()
|
||||
for i := range int64(b.N) {
|
||||
// Generate new random sidecars to ensure the KZG backend does not cache anything.
|
||||
sidecars := generateRandomSidecars(b, i, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars)
|
||||
|
||||
for _, sidecar := range roDataColumnSidecars {
|
||||
sidecars := []blocks.RODataColumn{sidecar}
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.B) {
|
||||
const blobCount = 12
|
||||
numberOfColumns := int64(params.BeaconConfig().NumberOfColumns)
|
||||
err := kzg.Start()
|
||||
require.NoError(b, err)
|
||||
|
||||
columnsCounts := []int64{1, 2, 4, 8, 16, 32, 64, 128}
|
||||
|
||||
for i, columnsCount := range columnsCounts {
|
||||
b.Run(fmt.Sprintf("columnsCount_%d", columnsCount), func(b *testing.B) {
|
||||
b.StopTimer()
|
||||
b.ResetTimer()
|
||||
|
||||
for j := range int64(b.N) {
|
||||
allSidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
|
||||
for k := int64(0); k < numberOfColumns; k += columnsCount {
|
||||
// Use different seeds to generate different blobs/commitments
|
||||
seed := int64(b.N*i) + numberOfColumns*j + blobCount*k
|
||||
sidecars := generateRandomSidecars(b, seed, blobCount)
|
||||
|
||||
// Pick sidecars.
|
||||
allSidecars = append(allSidecars, sidecars[k:k+columnsCount]...)
|
||||
}
|
||||
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, allSidecars)
|
||||
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing.B) {
|
||||
const (
|
||||
blobCount = 12
|
||||
|
||||
// columnsCount*batchCount = 128
|
||||
columnsCount = 4
|
||||
batchCount = 32
|
||||
)
|
||||
|
||||
err := kzg.Start()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.StopTimer()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := range int64(b.N) {
|
||||
allSidecars := make([][]blocks.RODataColumn, 0, batchCount)
|
||||
for j := range int64(batchCount) {
|
||||
// Use different seeds to generate different blobs/commitments
|
||||
sidecars := generateRandomSidecars(b, int64(batchCount)*i+j*blobCount, blobCount)
|
||||
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars[:columnsCount])
|
||||
allSidecars = append(allSidecars, roDataColumnSidecars)
|
||||
}
|
||||
|
||||
for _, sidecars := range allSidecars {
|
||||
b.StartTimer()
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
b.StopTimer()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgProofs [][]byte) blocks.RODataColumn {
|
||||
pbSignedBeaconBlock := util.NewBeaconBlockDeneb()
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock)
|
||||
@@ -302,3 +357,42 @@ func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgPr
|
||||
|
||||
return roSidecar
|
||||
}
|
||||
|
||||
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataColumnSidecar {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
|
||||
commitments := make([][]byte, 0, blobCount)
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
|
||||
for i := range blobCount {
|
||||
subSeed := seed + i
|
||||
blob := getRandBlob(subSeed)
|
||||
commitment, err := generateCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitments = append(commitments, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = commitments
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
sidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
return sidecars
|
||||
}
|
||||
|
||||
func generateRODataColumnSidecars(t testing.TB, sidecars []*ethpb.DataColumnSidecar) []blocks.RODataColumn {
|
||||
roDataColumnSidecars := make([]blocks.RODataColumn, 0, len(sidecars))
|
||||
for _, sidecar := range sidecars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
roDataColumnSidecars = append(roDataColumnSidecars, roCol)
|
||||
}
|
||||
|
||||
return roDataColumnSidecars
|
||||
}
|
||||
|
||||
@@ -8,18 +8,30 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func generateCommitment(blob *kzg.Blob) (*kzg.Commitment, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob to kzg commitment")
|
||||
}
|
||||
|
||||
return &commitment, nil
|
||||
}
|
||||
|
||||
func generateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"batch.go",
|
||||
"blob.go",
|
||||
"cache.go",
|
||||
"data_column.go",
|
||||
"error.go",
|
||||
"fake.go",
|
||||
"filesystem.go",
|
||||
@@ -21,12 +22,14 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -51,16 +54,21 @@ go_test(
|
||||
"batch_test.go",
|
||||
"blob_test.go",
|
||||
"cache_test.go",
|
||||
"data_column_test.go",
|
||||
"initializer_test.go",
|
||||
"result_test.go",
|
||||
"verification_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -32,7 +32,7 @@ type BlobBatchVerifier struct {
|
||||
}
|
||||
|
||||
// VerifiedROBlobs satisfies the das.BlobBatchVerifier interface, used by das.AvailabilityStore.
|
||||
func (batch *BlobBatchVerifier) VerifiedROBlobs(ctx context.Context, blk blocks.ROBlock, scs []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
|
||||
func (batch *BlobBatchVerifier) VerifiedROBlobs(_ context.Context, blk blocks.ROBlock, scs []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
|
||||
if len(scs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -25,6 +25,10 @@ const (
|
||||
RequireSidecarInclusionProven
|
||||
RequireSidecarKzgProofVerified
|
||||
RequireSidecarProposerExpected
|
||||
|
||||
// Data columns specific.
|
||||
RequireValidFields
|
||||
RequireCorrectSubnet
|
||||
)
|
||||
|
||||
var allBlobSidecarRequirements = []Requirement{
|
||||
|
||||
@@ -21,7 +21,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultSignatureCacheSize = 256
|
||||
DefaultSignatureCacheSize = 256
|
||||
DefaultInclusionProofCacheSize = 2
|
||||
)
|
||||
|
||||
// ValidatorAtIndexer defines the method needed to retrieve a validator by its index.
|
||||
@@ -73,6 +74,14 @@ type sigCache struct {
|
||||
getFork forkLookup
|
||||
}
|
||||
|
||||
type inclusionProofCache struct {
|
||||
*lru.Cache
|
||||
}
|
||||
|
||||
func newInclusionProofCache(size int) *inclusionProofCache {
|
||||
return &inclusionProofCache{Cache: lruwrpr.New(size)}
|
||||
}
|
||||
|
||||
// VerifySignature verifies the given signature data against the key obtained via ValidatorAtIndexer.
|
||||
func (c *sigCache) VerifySignature(sig SignatureData, v ValidatorAtIndexer) (err error) {
|
||||
defer func() {
|
||||
|
||||
535
beacon-chain/verification/data_column.go
Normal file
535
beacon-chain/verification/data_column.go
Normal file
@@ -0,0 +1,535 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// GossipDataColumnSidecarRequirements defines the set of requirements that DataColumnSidecars received on gossip
|
||||
// must satisfy in order to upgrade an RODataColumn to a VerifiedRODataColumn.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/p2p-interface.md#data_column_sidecar_subnet_id
|
||||
GossipDataColumnSidecarRequirements = []Requirement{
|
||||
RequireValidFields,
|
||||
RequireCorrectSubnet,
|
||||
RequireNotFromFutureSlot,
|
||||
RequireSlotAboveFinalized,
|
||||
RequireValidProposerSignature,
|
||||
RequireSidecarParentSeen,
|
||||
RequireSidecarParentValid,
|
||||
RequireSidecarParentSlotLower,
|
||||
RequireSidecarDescendsFromFinalized,
|
||||
RequireSidecarInclusionProven,
|
||||
RequireSidecarKzgProofVerified,
|
||||
RequireSidecarProposerExpected,
|
||||
}
|
||||
|
||||
errColumnsInvalid = errors.New("data columns failed verification")
|
||||
errBadTopicLength = errors.New("topic length is invalid")
|
||||
errBadTopic = errors.New("topic is not of the one expected")
|
||||
)
|
||||
|
||||
type (
|
||||
RODataColumnsVerifier struct {
|
||||
*sharedResources
|
||||
results *results
|
||||
dataColumns []blocks.RODataColumn
|
||||
verifyDataColumnsCommitment rodataColumnsCommitmentVerifier
|
||||
stateByRoot map[[fieldparams.RootLength]byte]state.BeaconState
|
||||
}
|
||||
|
||||
rodataColumnsCommitmentVerifier func([]blocks.RODataColumn) error
|
||||
)
|
||||
|
||||
var _ DataColumnsVerifier = &RODataColumnsVerifier{}
|
||||
|
||||
// VerifiedRODataColumns "upgrades" wrapped RODataColumns to VerifiedRODataColumns.
|
||||
// If any of the verifications ran against the data columns failed, or some required verifications
|
||||
// were not run, an error will be returned.
|
||||
func (dv *RODataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
if !dv.results.allSatisfied() {
|
||||
return nil, dv.results.errors(errColumnsInvalid)
|
||||
}
|
||||
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dv.dataColumns))
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(dataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
return verifiedRODataColumns, nil
|
||||
}
|
||||
|
||||
// SatisfyRequirement allows the caller to assert that a requirement has been satisfied.
|
||||
// This gives us a way to tick the box for a requirement where the usual method would be impractical.
|
||||
// For example, when batch syncing, forkchoice is only updated at the end of the batch. So the checks that use
|
||||
// forkchoice, like descends from finalized or parent seen, would necessarily fail. Allowing the caller to
|
||||
// assert the requirement has been satisfied ensures we have an easy way to audit which piece of code is satisfying
|
||||
// a requirement outside of this package.
|
||||
func (dv *RODataColumnsVerifier) SatisfyRequirement(req Requirement) {
|
||||
dv.recordResult(req, nil)
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) recordResult(req Requirement, err *error) {
|
||||
if err == nil || *err == nil {
|
||||
dv.results.record(req, nil)
|
||||
return
|
||||
}
|
||||
dv.results.record(req, *err)
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) ValidFields() (err error) {
|
||||
if ok, err := dv.results.cached(RequireValidFields); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireValidFields, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
if err := peerdas.VerifyDataColumnSidecar(dataColumn); err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "verify data column sidecar"))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) (err error) {
|
||||
if ok, err := dv.results.cached(RequireCorrectSubnet); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireCorrectSubnet, &err)
|
||||
|
||||
if len(expectedTopics) != len(dv.dataColumns) {
|
||||
return columnErrBuilder(errBadTopicLength)
|
||||
}
|
||||
|
||||
for i := range dv.dataColumns {
|
||||
// We add a trailing slash to avoid, for example,
|
||||
// an actual topic /eth2/9dc47cc6/data_column_sidecar_1
|
||||
// to match with /eth2/9dc47cc6/data_column_sidecar_120
|
||||
expectedTopic := expectedTopics[i] + "/"
|
||||
|
||||
actualSubnet := peerdas.ComputeSubnetForDataColumnSidecar(dv.dataColumns[i].Index)
|
||||
actualSubTopic := fmt.Sprintf(dataColumnSidecarSubTopic, actualSubnet)
|
||||
|
||||
if !strings.Contains(expectedTopic, actualSubTopic) {
|
||||
return columnErrBuilder(errBadTopic)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) NotFromFutureSlot() (err error) {
|
||||
if ok, err := dv.results.cached(RequireNotFromFutureSlot); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireNotFromFutureSlot, &err)
|
||||
|
||||
// Retrieve the current slot.
|
||||
currentSlot := dv.clock.CurrentSlot()
|
||||
|
||||
// Get the current time.
|
||||
now := dv.clock.Now()
|
||||
|
||||
// Retrieve the maximum gossip clock disparity.
|
||||
maximumGossipClockDisparity := params.BeaconConfig().MaximumGossipClockDisparityDuration()
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the data column slot.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Skip if the data column slotis the same as the current slot.
|
||||
if currentSlot == dataColumnSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
// earliestStart represents the time the slot starts, lowered by MAXIMUM_GOSSIP_CLOCK_DISPARITY.
|
||||
// We lower the time by MAXIMUM_GOSSIP_CLOCK_DISPARITY in case system time is running slightly behind real time.
|
||||
earliestStart := dv.clock.SlotStart(dataColumnSlot).Add(-maximumGossipClockDisparity)
|
||||
|
||||
// If the system time is still before earliestStart, we consider the column from a future slot and return an error.
|
||||
if now.Before(earliestStart) {
|
||||
return columnErrBuilder(ErrFromFutureSlot)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) SlotAboveFinalized() (err error) {
|
||||
if ok, err := dv.results.cached(RequireSlotAboveFinalized); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireSlotAboveFinalized, &err)
|
||||
|
||||
// Retrieve the finalized checkpoint.
|
||||
finalizedCheckpoint := dv.fc.FinalizedCheckpoint()
|
||||
|
||||
// Compute the first slot of the finalized checkpoint epoch.
|
||||
startSlot, err := slots.EpochStart(finalizedCheckpoint.Epoch)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "epoch start"))
|
||||
}
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the data column slot.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Check if the data column slot is after first slot of the epoch corresponding to the finalized checkpoint.
|
||||
if dataColumnSlot <= startSlot {
|
||||
return columnErrBuilder(ErrSlotNotAfterFinalized)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) ValidProposerSignature(ctx context.Context) (err error) {
|
||||
if ok, err := dv.results.cached(RequireValidProposerSignature); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireValidProposerSignature, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the signature data from the data column.
|
||||
signatureData := columnToSignatureData(dataColumn)
|
||||
|
||||
// Get logging fields.
|
||||
fields := logging.DataColumnFields(dataColumn)
|
||||
log := log.WithFields(fields)
|
||||
|
||||
// First check if there is a cached verification that can be reused.
|
||||
seen, err := dv.sc.SignatureVerified(signatureData)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Reusing failed proposer signature validation from cache")
|
||||
|
||||
columnVerificationProposerSignatureCache.WithLabelValues("hit-invalid").Inc()
|
||||
return columnErrBuilder(ErrInvalidProposerSignature)
|
||||
}
|
||||
|
||||
// If yes, we can skip the full verification.
|
||||
if seen {
|
||||
columnVerificationProposerSignatureCache.WithLabelValues("hit-valid").Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
columnVerificationProposerSignatureCache.WithLabelValues("miss").Inc()
|
||||
|
||||
// Retrieve the parent state.
|
||||
parentState, err := dv.parentState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "parent state"))
|
||||
}
|
||||
|
||||
// Full verification, which will subsequently be cached for anything sharing the signature cache.
|
||||
if err = dv.sc.VerifySignature(signatureData, parentState); err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "verify signature"))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) (err error) {
|
||||
if ok, err := dv.results.cached(RequireSidecarParentSeen); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireSidecarParentSeen, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Skip if the parent root has been seen.
|
||||
if parentSeen != nil && parentSeen(parentRoot) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !dv.fc.HasNode(parentRoot) {
|
||||
return columnErrBuilder(ErrSidecarParentNotSeen)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) (err error) {
|
||||
if ok, err := dv.results.cached(RequireSidecarParentValid); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireSidecarParentValid, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
if badParent != nil && badParent(parentRoot) {
|
||||
return columnErrBuilder(ErrSidecarParentInvalid)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) SidecarParentSlotLower() (err error) {
|
||||
if ok, err := dv.results.cached(RequireSidecarParentSlotLower); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireSidecarParentSlotLower, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Compute the slot of the parent block.
|
||||
parentSlot, err := dv.fc.Slot(parentRoot)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "slot"))
|
||||
}
|
||||
|
||||
// Extract the slot of the data column.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Check if the data column slot is after the parent slot.
|
||||
if parentSlot >= dataColumnSlot {
|
||||
return ErrSlotNotAfterParent
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) SidecarDescendsFromFinalized() (err error) {
|
||||
if ok, err := dv.results.cached(RequireSidecarDescendsFromFinalized); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireSidecarDescendsFromFinalized, &err)
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
if !dv.fc.HasNode(parentRoot) {
|
||||
return columnErrBuilder(ErrSidecarNotFinalizedDescendent)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) SidecarInclusionProven() (err error) {
|
||||
if ok, err := dv.results.cached(RequireSidecarInclusionProven); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireSidecarInclusionProven, &err)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
k, keyErr := inclusionProofKey(dataColumn)
|
||||
if keyErr == nil {
|
||||
if _, ok := dv.ic.Get(k); ok {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
log.WithError(keyErr).Error("Failed to get inclusion proof key")
|
||||
}
|
||||
|
||||
if err = peerdas.VerifyDataColumnSidecarInclusionProof(dataColumn); err != nil {
|
||||
return columnErrBuilder(ErrSidecarInclusionProofInvalid)
|
||||
}
|
||||
|
||||
if keyErr == nil {
|
||||
dv.ic.Add(k, struct{}{})
|
||||
}
|
||||
}
|
||||
|
||||
dataColumnSidecarInclusionProofVerificationHistogram.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) SidecarKzgProofVerified() (err error) {
|
||||
if ok, err := dv.results.cached(RequireSidecarKzgProofVerified); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireSidecarKzgProofVerified, &err)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
err = dv.verifyDataColumnsCommitment(dv.dataColumns)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "verify data column commitment"))
|
||||
}
|
||||
|
||||
dataColumnBatchKZGVerificationHistogram.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (err error) {
|
||||
if ok, err := dv.results.cached(RequireSidecarProposerExpected); ok {
|
||||
return err
|
||||
}
|
||||
|
||||
defer dv.recordResult(RequireSidecarProposerExpected, &err)
|
||||
|
||||
type slotParentRoot struct {
|
||||
slot primitives.Slot
|
||||
parentRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
targetRootBySlotParentRoot := make(map[slotParentRoot][fieldparams.RootLength]byte)
|
||||
|
||||
var targetRootFromCache = func(slot primitives.Slot, parentRoot [fieldparams.RootLength]byte) ([fieldparams.RootLength]byte, error) {
|
||||
// Use cached values if available.
|
||||
slotParentRoot := slotParentRoot{slot: slot, parentRoot: parentRoot}
|
||||
if root, ok := targetRootBySlotParentRoot[slotParentRoot]; ok {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(slot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Compute the target root for the epoch.
|
||||
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
if err != nil {
|
||||
return [fieldparams.RootLength]byte{}, errors.Wrap(err, "target root from epoch")
|
||||
}
|
||||
|
||||
// Store the target root in the cache.
|
||||
targetRootBySlotParentRoot[slotParentRoot] = targetRoot
|
||||
|
||||
return targetRoot, nil
|
||||
}
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the slot of the data column.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Compute the target root for the data column.
|
||||
targetRoot, err := targetRootFromCache(dataColumnSlot, parentRoot)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "target root"))
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Create a checkpoint for the target root.
|
||||
checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch}
|
||||
|
||||
// Try to extract the proposer index from the data column in the cache.
|
||||
idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot)
|
||||
|
||||
if !cached {
|
||||
// Retrieve the parent state.
|
||||
parentState, err := dv.parentState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "parent state"))
|
||||
}
|
||||
|
||||
idx, err = dv.pc.ComputeProposer(ctx, parentRoot, dataColumnSlot, parentState)
|
||||
if err != nil {
|
||||
return columnErrBuilder(errors.Wrap(err, "compute proposer"))
|
||||
}
|
||||
}
|
||||
|
||||
if idx != dataColumn.ProposerIndex() {
|
||||
return columnErrBuilder(ErrSidecarUnexpectedProposer)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parentState retrieves the parent state of the data column from the cache if possible, else retrieves it from the state by rooter.
|
||||
func (dv *RODataColumnsVerifier) parentState(ctx context.Context, dataColumn blocks.RODataColumn) (state.BeaconState, error) {
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// If the parent root is already in the cache, return it.
|
||||
if st, ok := dv.stateByRoot[parentRoot]; ok {
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// Retrieve the parent state from the state by rooter.
|
||||
st, err := dv.sr.StateByRoot(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "state by root")
|
||||
}
|
||||
|
||||
// Store the parent state in the cache.
|
||||
dv.stateByRoot[parentRoot] = st
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func columnToSignatureData(d blocks.RODataColumn) SignatureData {
|
||||
return SignatureData{
|
||||
Root: d.BlockRoot(),
|
||||
Parent: d.ParentRoot(),
|
||||
Signature: bytesutil.ToBytes96(d.SignedBlockHeader.Signature),
|
||||
Proposer: d.ProposerIndex(),
|
||||
Slot: d.Slot(),
|
||||
}
|
||||
}
|
||||
|
||||
func columnErrBuilder(baseErr error) error {
|
||||
return errors.Wrap(baseErr, errColumnsInvalid.Error())
|
||||
}
|
||||
|
||||
func inclusionProofKey(c blocks.RODataColumn) ([160]byte, error) {
|
||||
var key [160]byte
|
||||
if len(c.KzgCommitmentsInclusionProof) != 4 {
|
||||
// This should be already enforced by ssz unmarshaling; still check so we don't panic on array bounds.
|
||||
return key, columnErrBuilder(ErrSidecarInclusionProofInvalid)
|
||||
}
|
||||
|
||||
root, err := c.SignedBlockHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return [160]byte{}, errors.Wrap(err, "hash tree root")
|
||||
}
|
||||
|
||||
for i := range c.KzgCommitmentsInclusionProof {
|
||||
if copy(key[32*i:32*i+32], c.KzgCommitmentsInclusionProof[i]) != 32 {
|
||||
return key, columnErrBuilder(ErrSidecarInclusionProofInvalid)
|
||||
}
|
||||
}
|
||||
|
||||
copy(key[128:], root[:])
|
||||
return key, nil
|
||||
}
|
||||
978
beacon-chain/verification/data_column_test.go
Normal file
978
beacon-chain/verification/data_column_test.go
Normal file
@@ -0,0 +1,978 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func GenerateTestDataColumns(t *testing.T, parent [fieldparams.RootLength]byte, slot primitives.Slot, blobCount int) []blocks.RODataColumn {
|
||||
roBlock, roBlobs := util.GenerateTestDenebBlockWithSidecar(t, parent, slot, blobCount)
|
||||
blobs := make([]kzg.Blob, 0, len(roBlobs))
|
||||
for i := range roBlobs {
|
||||
blobs = append(blobs, kzg.Blob(roBlobs[i].Blob))
|
||||
}
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(roBlock, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, len(dataColumnSidecars))
|
||||
for i := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecars[i])
|
||||
require.NoError(t, err)
|
||||
roDataColumns = append(roDataColumns, roDataColumn)
|
||||
}
|
||||
|
||||
return roDataColumns
|
||||
}
|
||||
|
||||
func TestColumnSatisfyRequirement(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 1
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
intializer := Initializer{}
|
||||
|
||||
v := intializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
require.Equal(t, false, v.results.executed(RequireValidProposerSignature))
|
||||
v.SatisfyRequirement(RequireValidProposerSignature)
|
||||
require.Equal(t, true, v.results.executed(RequireValidProposerSignature))
|
||||
}
|
||||
|
||||
func TestValid(t *testing.T) {
|
||||
var initializer Initializer
|
||||
|
||||
t.Run("one invalid column", func(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, [fieldparams.RootLength]byte{}, 1, 1)
|
||||
columns[0].KzgCommitments = [][]byte{}
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
|
||||
err := verifier.ValidFields()
|
||||
require.NotNil(t, err)
|
||||
require.NotNil(t, verifier.results.result(RequireValidFields))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, [fieldparams.RootLength]byte{}, 1, 1)
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
|
||||
err := verifier.ValidFields()
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, verifier.results.result(RequireValidFields))
|
||||
|
||||
err = verifier.ValidFields()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCorrectSubnet(t *testing.T) {
|
||||
const dataColumnSidecarSubTopic = "/data_column_sidecar_%d/"
|
||||
|
||||
var initializer Initializer
|
||||
|
||||
t.Run("lengths mismatch", func(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, [fieldparams.RootLength]byte{}, 1, 1)
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
|
||||
err := verifier.CorrectSubnet(dataColumnSidecarSubTopic, []string{})
|
||||
require.ErrorIs(t, err, errBadTopicLength)
|
||||
require.NotNil(t, verifier.results.result(RequireCorrectSubnet))
|
||||
})
|
||||
|
||||
t.Run("wrong topic", func(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, [fieldparams.RootLength]byte{}, 1, 1)
|
||||
verifier := initializer.NewDataColumnsVerifier(columns[:2], GossipDataColumnSidecarRequirements)
|
||||
|
||||
err := verifier.CorrectSubnet(
|
||||
dataColumnSidecarSubTopic,
|
||||
[]string{
|
||||
"/eth2/9dc47cc6/data_column_sidecar_1/ssz_snappy",
|
||||
"/eth2/9dc47cc6/data_column_sidecar_0/ssz_snappy",
|
||||
})
|
||||
|
||||
require.ErrorIs(t, err, errBadTopic)
|
||||
require.NotNil(t, verifier.results.result(RequireCorrectSubnet))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
subnets := []string{
|
||||
"/eth2/9dc47cc6/data_column_sidecar_0/ssz_snappy",
|
||||
"/eth2/9dc47cc6/data_column_sidecar_1",
|
||||
}
|
||||
|
||||
columns := GenerateTestDataColumns(t, [fieldparams.RootLength]byte{}, 1, 1)
|
||||
verifier := initializer.NewDataColumnsVerifier(columns[:2], GossipDataColumnSidecarRequirements)
|
||||
|
||||
err := verifier.CorrectSubnet(dataColumnSidecarSubTopic, subnets)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, verifier.results.result(RequireCorrectSubnet))
|
||||
|
||||
err = verifier.CorrectSubnet(dataColumnSidecarSubTopic, subnets)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNotFromFutureSlot(t *testing.T) {
|
||||
maximumGossipClockDisparity := params.BeaconConfig().MaximumGossipClockDisparityDuration()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
currentSlot, columnSlot primitives.Slot
|
||||
timeBeforeCurrentSlot time.Duration
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "column slot == current slot",
|
||||
currentSlot: 42,
|
||||
columnSlot: 42,
|
||||
timeBeforeCurrentSlot: 0,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "within maximum gossip clock disparity",
|
||||
currentSlot: 42,
|
||||
columnSlot: 42,
|
||||
timeBeforeCurrentSlot: maximumGossipClockDisparity / 2,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "outside maximum gossip clock disparity",
|
||||
currentSlot: 42,
|
||||
columnSlot: 42,
|
||||
timeBeforeCurrentSlot: maximumGossipClockDisparity * 2,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "too far in the future",
|
||||
currentSlot: 10,
|
||||
columnSlot: 42,
|
||||
timeBeforeCurrentSlot: 0,
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const blobCount = 1
|
||||
|
||||
now := time.Now()
|
||||
secondsPerSlot := time.Duration(params.BeaconConfig().SecondsPerSlot)
|
||||
genesis := now.Add(-time.Duration(tc.currentSlot) * secondsPerSlot * time.Second)
|
||||
|
||||
clock := startup.NewClock(
|
||||
genesis,
|
||||
[fieldparams.RootLength]byte{},
|
||||
startup.WithNower(func() time.Time {
|
||||
return now.Add(-tc.timeBeforeCurrentSlot)
|
||||
}),
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
initializer := Initializer{shared: &sharedResources{clock: clock}}
|
||||
|
||||
columns := GenerateTestDataColumns(t, parentRoot, tc.columnSlot, blobCount)
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
|
||||
err := verifier.NotFromFutureSlot()
|
||||
require.Equal(t, true, verifier.results.executed(RequireNotFromFutureSlot))
|
||||
|
||||
if tc.isError {
|
||||
require.ErrorIs(t, err, ErrFromFutureSlot)
|
||||
require.NotNil(t, verifier.results.result(RequireNotFromFutureSlot))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, verifier.results.result(RequireNotFromFutureSlot))
|
||||
|
||||
err = verifier.NotFromFutureSlot()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestColumnSlotAboveFinalized(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
finalizedSlot, columnSlot primitives.Slot
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
name: "finalized epoch < column epoch",
|
||||
finalizedSlot: 10,
|
||||
columnSlot: 96,
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "finalized slot < column slot (same epoch)",
|
||||
finalizedSlot: 32,
|
||||
columnSlot: 33,
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
name: "finalized slot == column slot",
|
||||
finalizedSlot: 64,
|
||||
columnSlot: 64,
|
||||
isErr: true,
|
||||
},
|
||||
{
|
||||
name: "finalized epoch > column epoch",
|
||||
finalizedSlot: 32,
|
||||
columnSlot: 31,
|
||||
isErr: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
const blobCount = 1
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
finalizedCheckpoint := func() *forkchoicetypes.Checkpoint {
|
||||
return &forkchoicetypes.Checkpoint{
|
||||
Epoch: slots.ToEpoch(tc.finalizedSlot),
|
||||
Root: [fieldparams.RootLength]byte{},
|
||||
}
|
||||
}
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
initializer := &Initializer{shared: &sharedResources{
|
||||
fc: &mockForkchoicer{FinalizedCheckpointCB: finalizedCheckpoint},
|
||||
}}
|
||||
|
||||
columns := GenerateTestDataColumns(t, parentRoot, tc.columnSlot, blobCount)
|
||||
|
||||
v := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
|
||||
err := v.SlotAboveFinalized()
|
||||
require.Equal(t, true, v.results.executed(RequireSlotAboveFinalized))
|
||||
|
||||
if tc.isErr {
|
||||
require.ErrorIs(t, err, ErrSlotNotAfterFinalized)
|
||||
require.NotNil(t, v.results.result(RequireSlotAboveFinalized))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, v.results.result(RequireSlotAboveFinalized))
|
||||
|
||||
err = v.SlotAboveFinalized()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidProposerSignature(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
validator := ðpb.Validator{}
|
||||
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
|
||||
// The signature data does not depend on the data column itself, so we can use the first one.
|
||||
expectedSignatureData := columnToSignatureData(firstColumn)
|
||||
|
||||
testCases := []struct {
|
||||
isError bool
|
||||
vscbShouldError bool
|
||||
svcbReturn bool
|
||||
stateByRooter StateByRooter
|
||||
vscbError error
|
||||
svcbError error
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "cache hit - success",
|
||||
svcbReturn: true,
|
||||
svcbError: nil,
|
||||
vscbShouldError: true,
|
||||
vscbError: nil,
|
||||
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "cache hit - error",
|
||||
svcbReturn: true,
|
||||
svcbError: errors.New("derp"),
|
||||
vscbShouldError: true,
|
||||
vscbError: nil,
|
||||
stateByRooter: &mockStateByRooter{sbr: sbrErrorIfCalled(t)},
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "cache miss - success",
|
||||
svcbReturn: false,
|
||||
svcbError: nil,
|
||||
vscbShouldError: false,
|
||||
vscbError: nil,
|
||||
stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator),
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "cache miss - state not found",
|
||||
svcbReturn: false,
|
||||
svcbError: nil,
|
||||
vscbShouldError: false,
|
||||
vscbError: nil,
|
||||
stateByRooter: sbrNotFound(t, expectedSignatureData.Parent),
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "cache miss - signature failure",
|
||||
svcbReturn: false,
|
||||
svcbError: nil,
|
||||
vscbShouldError: false,
|
||||
vscbError: errors.New("signature, not so good!"),
|
||||
stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator),
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
signatureCache := &mockSignatureCache{
|
||||
svcb: func(signatureData SignatureData) (bool, error) {
|
||||
if signatureData != expectedSignatureData {
|
||||
t.Error("Did not see expected SignatureData")
|
||||
}
|
||||
return tc.svcbReturn, tc.svcbError
|
||||
},
|
||||
vscb: func(signatureData SignatureData, _ ValidatorAtIndexer) (err error) {
|
||||
if tc.vscbShouldError {
|
||||
t.Error("VerifySignature should not be called if the result is cached")
|
||||
return nil
|
||||
}
|
||||
|
||||
if expectedSignatureData != signatureData {
|
||||
t.Error("unexpected signature data")
|
||||
}
|
||||
|
||||
return tc.vscbError
|
||||
},
|
||||
}
|
||||
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sc: signatureCache,
|
||||
sr: tc.stateByRooter,
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.ValidProposerSignature(context.Background())
|
||||
require.Equal(t, true, verifier.results.executed(RequireValidProposerSignature))
|
||||
|
||||
if tc.isError {
|
||||
require.NotNil(t, err)
|
||||
require.NotNil(t, verifier.results.result(RequireValidProposerSignature))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, verifier.results.result(RequireValidProposerSignature))
|
||||
|
||||
err = verifier.ValidProposerSignature(context.Background())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarParentSeen(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
|
||||
fcHas := &mockForkchoicer{
|
||||
HasNodeCB: func(parent [fieldparams.RootLength]byte) bool {
|
||||
if parent != firstColumn.ParentRoot() {
|
||||
t.Error("forkchoice.HasNode called with unexpected parent root")
|
||||
}
|
||||
|
||||
return true
|
||||
},
|
||||
}
|
||||
|
||||
fcLacks := &mockForkchoicer{
|
||||
HasNodeCB: func(parent [fieldparams.RootLength]byte) bool {
|
||||
if parent != firstColumn.ParentRoot() {
|
||||
t.Error("forkchoice.HasNode called with unexpected parent root")
|
||||
}
|
||||
|
||||
return false
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
forkChoicer Forkchoicer
|
||||
parentSeen func([fieldparams.RootLength]byte) bool
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "happy path",
|
||||
forkChoicer: fcHas,
|
||||
parentSeen: nil,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "HasNode false, no badParent cb, expected error",
|
||||
forkChoicer: fcLacks,
|
||||
parentSeen: nil,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "HasNode false, badParent true",
|
||||
forkChoicer: fcLacks,
|
||||
parentSeen: badParentCb(t, firstColumn.ParentRoot(), true),
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "HasNode false, badParent false",
|
||||
forkChoicer: fcLacks,
|
||||
parentSeen: badParentCb(t, firstColumn.ParentRoot(), false),
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
initializer := Initializer{shared: &sharedResources{fc: tc.forkChoicer}}
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarParentSeen(tc.parentSeen)
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarParentSeen))
|
||||
|
||||
if tc.isError {
|
||||
require.ErrorIs(t, err, ErrSidecarParentNotSeen)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarParentSeen))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarParentSeen))
|
||||
|
||||
err = verifier.SidecarParentSeen(tc.parentSeen)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarParentValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
badParentCbReturn bool
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "parent valid",
|
||||
badParentCbReturn: false,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "parent not valid",
|
||||
badParentCbReturn: true,
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
|
||||
initializer := Initializer{shared: &sharedResources{}}
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarParentValid(badParentCb(t, firstColumn.ParentRoot(), tc.badParentCbReturn))
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarParentValid))
|
||||
|
||||
if tc.isError {
|
||||
require.ErrorIs(t, err, ErrSidecarParentInvalid)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarParentValid))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarParentValid))
|
||||
|
||||
err = verifier.SidecarParentValid(badParentCb(t, firstColumn.ParentRoot(), tc.badParentCbReturn))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestColumnSidecarParentSlotLower(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, [32]byte{}, 1, 1)
|
||||
firstColumn := columns[0]
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
forkChoiceSlot primitives.Slot
|
||||
forkChoiceError, err error
|
||||
errCheckValue bool
|
||||
}{
|
||||
{
|
||||
name: "Not in forkchoice",
|
||||
forkChoiceError: errors.New("not in forkchoice"),
|
||||
err: ErrSlotNotAfterParent,
|
||||
},
|
||||
{
|
||||
name: "In forkchoice, slot lower",
|
||||
forkChoiceSlot: firstColumn.Slot() - 1,
|
||||
},
|
||||
{
|
||||
name: "In forkchoice, slot equal",
|
||||
forkChoiceSlot: firstColumn.Slot(),
|
||||
err: ErrSlotNotAfterParent,
|
||||
errCheckValue: true,
|
||||
},
|
||||
{
|
||||
name: "In forkchoice, slot higher",
|
||||
forkChoiceSlot: firstColumn.Slot() + 1,
|
||||
err: ErrSlotNotAfterParent,
|
||||
errCheckValue: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{fc: &mockForkchoicer{
|
||||
SlotCB: func(r [32]byte) (primitives.Slot, error) {
|
||||
if firstColumn.ParentRoot() != r {
|
||||
t.Error("forkchoice.Slot called with unexpected parent root")
|
||||
}
|
||||
|
||||
return c.forkChoiceSlot, c.forkChoiceError
|
||||
},
|
||||
}},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarParentSlotLower()
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarParentSlotLower))
|
||||
|
||||
if c.err == nil {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarParentSlotLower))
|
||||
|
||||
err = verifier.SidecarParentSlotLower()
|
||||
require.NoError(t, err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
require.NotNil(t, err)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarParentSlotLower))
|
||||
|
||||
if c.errCheckValue {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarDescendsFromFinalized(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
hasNodeCBReturn bool
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "Not canonical",
|
||||
hasNodeCBReturn: false,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "Canonical",
|
||||
hasNodeCBReturn: true,
|
||||
isError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
fc: &mockForkchoicer{
|
||||
HasNodeCB: func(r [fieldparams.RootLength]byte) bool {
|
||||
if firstColumn.ParentRoot() != r {
|
||||
t.Error("forkchoice.Slot called with unexpected parent root")
|
||||
}
|
||||
|
||||
return tc.hasNodeCBReturn
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarDescendsFromFinalized()
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarDescendsFromFinalized))
|
||||
|
||||
if tc.isError {
|
||||
require.ErrorIs(t, err, ErrSidecarNotFinalizedDescendent)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarDescendsFromFinalized))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarDescendsFromFinalized))
|
||||
|
||||
err = verifier.SidecarDescendsFromFinalized()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarInclusionProven(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
alterate bool
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "Inclusion proven",
|
||||
alterate: false,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Inclusion not proven",
|
||||
alterate: true,
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
if tc.alterate {
|
||||
firstColumn := columns[0]
|
||||
byte0 := firstColumn.SignedBlockHeader.Header.BodyRoot[0]
|
||||
firstColumn.SignedBlockHeader.Header.BodyRoot[0] = byte0 ^ 255
|
||||
}
|
||||
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{ic: newInclusionProofCache(1)},
|
||||
}
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarInclusionProven()
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarInclusionProven))
|
||||
|
||||
if tc.isError {
|
||||
require.ErrorIs(t, err, ErrSidecarInclusionProofInvalid)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarInclusionProven))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarInclusionProven))
|
||||
|
||||
err = verifier.SidecarInclusionProven()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarKzgProofVerified(t *testing.T) {
|
||||
testCases := []struct {
|
||||
isError bool
|
||||
verifyDataColumnsCommitmentError error
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "KZG proof verified",
|
||||
verifyDataColumnsCommitmentError: nil,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "KZG proof not verified",
|
||||
verifyDataColumnsCommitmentError: errors.New("KZG proof error"),
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
|
||||
verifyDataColumnsCommitment := func(roDataColumns []blocks.RODataColumn) error {
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
require.Equal(t, true, reflect.DeepEqual(firstColumn.KzgCommitments, roDataColumn.KzgCommitments))
|
||||
}
|
||||
|
||||
return tc.verifyDataColumnsCommitmentError
|
||||
}
|
||||
|
||||
verifier := &RODataColumnsVerifier{
|
||||
results: newResults(),
|
||||
dataColumns: columns,
|
||||
verifyDataColumnsCommitment: verifyDataColumnsCommitment,
|
||||
}
|
||||
|
||||
err := verifier.SidecarKzgProofVerified()
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarKzgProofVerified))
|
||||
|
||||
if tc.isError {
|
||||
require.NotNil(t, err)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarKzgProofVerified))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarKzgProofVerified))
|
||||
|
||||
err = verifier.SidecarKzgProofVerified()
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarProposerExpected(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 1
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
|
||||
newColumns := GenerateTestDataColumns(t, parentRoot, 2*params.BeaconConfig().SlotsPerEpoch, blobCount)
|
||||
firstNewColumn := newColumns[0]
|
||||
|
||||
validator := ðpb.Validator{}
|
||||
|
||||
commonComputeProposerCB := func(_ context.Context, root [fieldparams.RootLength]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
require.Equal(t, firstColumn.ParentRoot(), root)
|
||||
require.Equal(t, firstColumn.Slot(), slot)
|
||||
return firstColumn.ProposerIndex(), nil
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
stateByRooter StateByRooter
|
||||
proposerCache ProposerCache
|
||||
columns []blocks.RODataColumn
|
||||
error string
|
||||
}{
|
||||
{
|
||||
name: "Cached, matches",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()),
|
||||
},
|
||||
columns: columns,
|
||||
},
|
||||
{
|
||||
name: "Cached, does not match",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1),
|
||||
},
|
||||
columns: columns,
|
||||
error: ErrSidecarUnexpectedProposer.Error(),
|
||||
},
|
||||
{
|
||||
name: "Not cached, state lookup failure",
|
||||
stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
},
|
||||
columns: columns,
|
||||
error: "state by root",
|
||||
},
|
||||
{
|
||||
name: "Not cached, proposer matches",
|
||||
stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
ComputeProposerCB: commonComputeProposerCB,
|
||||
},
|
||||
columns: columns,
|
||||
},
|
||||
{
|
||||
name: "Not cached, proposer matches",
|
||||
stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
ComputeProposerCB: commonComputeProposerCB,
|
||||
},
|
||||
columns: columns,
|
||||
},
|
||||
{
|
||||
name: "Not cached, proposer matches for next epoch",
|
||||
stateByRooter: sbrForValOverride(firstNewColumn.ProposerIndex(), validator),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
require.Equal(t, firstNewColumn.ParentRoot(), root)
|
||||
require.Equal(t, firstNewColumn.Slot(), slot)
|
||||
return firstColumn.ProposerIndex(), nil
|
||||
},
|
||||
},
|
||||
columns: newColumns,
|
||||
},
|
||||
{
|
||||
name: "Not cached, proposer does not match",
|
||||
stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
require.Equal(t, firstColumn.ParentRoot(), root)
|
||||
require.Equal(t, firstColumn.Slot(), slot)
|
||||
return firstColumn.ProposerIndex() + 1, nil
|
||||
},
|
||||
},
|
||||
columns: columns,
|
||||
error: ErrSidecarUnexpectedProposer.Error(),
|
||||
},
|
||||
{
|
||||
name: "Not cached, ComputeProposer fails",
|
||||
stateByRooter: sbrForValOverride(firstColumn.ProposerIndex(), validator),
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
require.Equal(t, firstColumn.ParentRoot(), root)
|
||||
require.Equal(t, firstColumn.Slot(), slot)
|
||||
return 0, errors.New("ComputeProposer failed")
|
||||
},
|
||||
},
|
||||
columns: columns,
|
||||
error: "compute proposer",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: tc.stateByRooter,
|
||||
pc: tc.proposerCache,
|
||||
fc: &mockForkchoicer{
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
verifier := initializer.NewDataColumnsVerifier(tc.columns, GossipDataColumnSidecarRequirements)
|
||||
err := verifier.SidecarProposerExpected(context.Background())
|
||||
|
||||
require.Equal(t, true, verifier.results.executed(RequireSidecarProposerExpected))
|
||||
|
||||
if len(tc.error) > 0 {
|
||||
require.ErrorContains(t, tc.error, err)
|
||||
require.NotNil(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, verifier.results.result(RequireSidecarProposerExpected))
|
||||
|
||||
err = verifier.SidecarProposerExpected(context.Background())
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
const (
|
||||
columnSlot = 1
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
parentRoot := [fieldparams.RootLength]byte{}
|
||||
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
initializer := Initializer{}
|
||||
verifier := initializer.NewDataColumnsVerifier(columns, GossipDataColumnSidecarRequirements)
|
||||
|
||||
// We haven't performed any verification, VerifiedRODataColumns should error.
|
||||
_, err := verifier.VerifiedRODataColumns()
|
||||
require.ErrorIs(t, err, errColumnsInvalid)
|
||||
|
||||
var me VerificationMultiError
|
||||
ok := errors.As(err, &me)
|
||||
require.Equal(t, true, ok)
|
||||
fails := me.Failures()
|
||||
|
||||
// We haven't performed any verification, so all the results should be this type.
|
||||
for _, v := range fails {
|
||||
require.ErrorIs(t, v, ErrMissingVerification)
|
||||
}
|
||||
|
||||
// Satisfy everything but the first requirement through the backdoor.
|
||||
for _, r := range GossipDataColumnSidecarRequirements[1:] {
|
||||
verifier.results.record(r, nil)
|
||||
}
|
||||
|
||||
// One requirement is missing, VerifiedRODataColumns should still error.
|
||||
_, err = verifier.VerifiedRODataColumns()
|
||||
require.ErrorIs(t, err, errColumnsInvalid)
|
||||
|
||||
// Now, satisfy the first requirement.
|
||||
verifier.results.record(GossipDataColumnSidecarRequirements[0], nil)
|
||||
|
||||
// VerifiedRODataColumns should now succeed.
|
||||
require.Equal(t, true, verifier.results.allSatisfied())
|
||||
_, err = verifier.VerifiedRODataColumns()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -114,3 +114,12 @@ func VerifiedROBlobError(err error) (blocks.VerifiedROBlob, error) {
|
||||
}
|
||||
return blocks.VerifiedROBlob{}, err
|
||||
}
|
||||
|
||||
// VerifiedRODataColumnError can be used by methods that have a VerifiedRODataColumn return type but do not have permission to
|
||||
// create a value of that type in order to generate an error return value.
|
||||
func VerifiedRODataColumnError(err error) (blocks.VerifiedRODataColumn, error) {
|
||||
if err == nil {
|
||||
return blocks.VerifiedRODataColumn{}, errVerificationImplementationFault
|
||||
}
|
||||
return blocks.VerifiedRODataColumn{}, err
|
||||
}
|
||||
|
||||
@@ -3,7 +3,21 @@ package verification
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
)
|
||||
|
||||
type (
|
||||
DataColumnParams struct {
|
||||
Slot primitives.Slot
|
||||
ColumnIndex uint64
|
||||
KzgCommitments [][]byte
|
||||
DataColumn []byte // A whole data cell will be filled with the content of one item of this slice.
|
||||
}
|
||||
|
||||
DataColumnsParamsByRoot map[[fieldparams.RootLength]byte][]DataColumnParams
|
||||
)
|
||||
|
||||
// FakeVerifyForTest can be used by tests that need a VerifiedROBlob but don't want to do all the
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
// VerifiedROBlobError creates a verified read-only blob sidecar from an error.
|
||||
func VerifiedROBlobFromDisk(fs afero.Fs, root [32]byte, path string) (blocks.VerifiedROBlob, error) {
|
||||
encoded, err := afero.ReadFile(fs, path)
|
||||
if err != nil {
|
||||
@@ -21,3 +24,33 @@ func VerifiedROBlobFromDisk(fs afero.Fs, root [32]byte, path string) (blocks.Ver
|
||||
}
|
||||
return blocks.NewVerifiedROBlob(ro), nil
|
||||
}
|
||||
|
||||
// VerifiedRODataColumnFromDisk created a verified read-only data column sidecar from disk.
|
||||
func VerifiedRODataColumnFromDisk(file afero.File, root [fieldparams.RootLength]byte, sszEncodedDataColumnSidecarSize uint32) (blocks.VerifiedRODataColumn, error) {
|
||||
// Read the ssz encoded data column sidecar from the file
|
||||
sszEncodedDataColumnSidecar := make([]byte, sszEncodedDataColumnSidecarSize)
|
||||
count, err := file.Read(sszEncodedDataColumnSidecar)
|
||||
if err != nil {
|
||||
return VerifiedRODataColumnError(err)
|
||||
}
|
||||
if uint32(count) != sszEncodedDataColumnSidecarSize {
|
||||
return VerifiedRODataColumnError(err)
|
||||
}
|
||||
|
||||
// Unmarshal the SSZ encoded data column sidecar.
|
||||
dataColumnSidecar := ðpb.DataColumnSidecar{}
|
||||
if err := dataColumnSidecar.UnmarshalSSZ(sszEncodedDataColumnSidecar); err != nil {
|
||||
return VerifiedRODataColumnError(err)
|
||||
}
|
||||
|
||||
// Create a RO data column.
|
||||
roDataColumnSidecar, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, root)
|
||||
if err != nil {
|
||||
return VerifiedRODataColumnError(err)
|
||||
}
|
||||
|
||||
// Create a verified RO data column.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
|
||||
return verifiedRODataColumn, nil
|
||||
}
|
||||
|
||||
@@ -5,9 +5,11 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
@@ -38,6 +40,7 @@ type sharedResources struct {
|
||||
sc SignatureCache
|
||||
pc ProposerCache
|
||||
sr StateByRooter
|
||||
ic *inclusionProofCache
|
||||
}
|
||||
|
||||
// Initializer is used to create different Verifiers.
|
||||
@@ -57,6 +60,18 @@ func (ini *Initializer) NewBlobVerifier(b blocks.ROBlob, reqs []Requirement) *RO
|
||||
}
|
||||
}
|
||||
|
||||
// NewDataColumnsVerifier creates a DataColumnVerifier for a slice of data columns, with the given set of requirements.
|
||||
// WARNING: The returned verifier is not thread-safe, and should not be used concurrently.
|
||||
func (ini *Initializer) NewDataColumnsVerifier(roDataColumns []blocks.RODataColumn, reqs []Requirement) *RODataColumnsVerifier {
|
||||
return &RODataColumnsVerifier{
|
||||
sharedResources: ini.shared,
|
||||
dataColumns: roDataColumns,
|
||||
results: newResults(reqs...),
|
||||
verifyDataColumnsCommitment: peerdas.VerifyDataColumnsSidecarKZGProofs,
|
||||
stateByRoot: make(map[[fieldparams.RootLength]byte]state.BeaconState),
|
||||
}
|
||||
}
|
||||
|
||||
// InitializerWaiter provides an Initializer once all dependent resources are ready
|
||||
// via the WaitForInitializer method.
|
||||
type InitializerWaiter struct {
|
||||
@@ -86,6 +101,7 @@ func NewInitializerWaiter(cw startup.ClockWaiter, fc Forkchoicer, sr StateByRoot
|
||||
fc: fc,
|
||||
pc: pc,
|
||||
sr: sr,
|
||||
ic: newInclusionProofCache(DefaultInclusionProofCacheSize),
|
||||
}
|
||||
iw := &InitializerWaiter{cw: cw, ini: &Initializer{shared: shared}}
|
||||
for _, o := range opts {
|
||||
@@ -107,6 +123,7 @@ func (w *InitializerWaiter) WaitForInitializer(ctx context.Context) (*Initialize
|
||||
vr := w.ini.shared.clock.GenesisValidatorsRoot()
|
||||
sc := newSigCache(vr[:], DefaultSignatureCacheSize, w.getFork)
|
||||
w.ini.shared.sc = sc
|
||||
w.ini.shared.ic = newInclusionProofCache(DefaultInclusionProofCacheSize)
|
||||
return w.ini, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package verification
|
||||
import (
|
||||
"context"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
)
|
||||
|
||||
@@ -29,3 +30,27 @@ type BlobVerifier interface {
|
||||
// NewBlobVerifier is a function signature that can be used by code that needs to be
|
||||
// able to mock Initializer.NewBlobVerifier without complex setup.
|
||||
type NewBlobVerifier func(b blocks.ROBlob, reqs []Requirement) BlobVerifier
|
||||
|
||||
// DataColumnsVerifier defines the methods implemented by the RODataColumnVerifier.
|
||||
// It serves a very similar purpose as the blob verifier interface for data columns.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error)
|
||||
SatisfyRequirement(Requirement)
|
||||
|
||||
ValidFields() error
|
||||
CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error
|
||||
NotFromFutureSlot() error
|
||||
SlotAboveFinalized() error
|
||||
ValidProposerSignature(ctx context.Context) error
|
||||
SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error
|
||||
SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error
|
||||
SidecarParentSlotLower() error
|
||||
SidecarDescendsFromFinalized() error
|
||||
SidecarInclusionProven() error
|
||||
SidecarKzgProofVerified() error
|
||||
SidecarProposerExpected(ctx context.Context) error
|
||||
}
|
||||
|
||||
// NewDataColumnsVerifier is a function signature that can be used to mock a setup where a
|
||||
// column verifier can be easily initialized.
|
||||
type NewDataColumnsVerifier func(dataColumns []blocks.RODataColumn, reqs []Requirement) DataColumnsVerifier
|
||||
|
||||
@@ -13,4 +13,25 @@ var (
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
columnVerificationProposerSignatureCache = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_column_verification_proposer_signature_cache",
|
||||
Help: "DataColumnSidecar proposer signature cache result.",
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
dataColumnSidecarInclusionProofVerificationHistogram = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_inclusion_proof_verification_milliseconds",
|
||||
Help: "Captures the time taken to verify data column sidecar inclusion proof.",
|
||||
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
||||
},
|
||||
)
|
||||
dataColumnBatchKZGVerificationHistogram = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_kzg_verification_data_column_batch_milliseconds",
|
||||
Help: "Captures the time taken for batched data column kzg verification.",
|
||||
Buckets: []float64{5, 10, 50, 100, 150, 250, 500, 1000, 2000},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -90,6 +90,11 @@ func (r *results) result(req Requirement) error {
|
||||
return r.done[req]
|
||||
}
|
||||
|
||||
func (r *results) cached(req Requirement) (bool, error) {
|
||||
result, ok := r.done[req]
|
||||
return ok, result
|
||||
}
|
||||
|
||||
func (r *results) errors(err error) error {
|
||||
return newVerificationMultiError(r, err)
|
||||
}
|
||||
|
||||
15
beacon-chain/verification/verification_test.go
Normal file
15
beacon-chain/verification/verification_test.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package verification
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
)
|
||||
|
||||
func TestMain(t *testing.M) {
|
||||
if err := kzg.Start(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
t.Run()
|
||||
}
|
||||
2
changelog/manu-peerdas-verification.md
Normal file
2
changelog/manu-peerdas-verification.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Data column sidecars verification methods.
|
||||
@@ -31,6 +31,7 @@ const (
|
||||
BlobLength = 131072 // BlobLength defines the byte length of a blob.
|
||||
BlobSize = 131072 // defined to match blob.size in bazel ssz codegen
|
||||
BlobSidecarSize = 131928 // defined to match blob sidecar size in bazel ssz codegen
|
||||
KzgCommitmentSize = 48 // KzgCommitmentSize defines the byte length of a KZG commitment.
|
||||
KzgCommitmentInclusionProofDepth = 17 // Merkle proof depth for blob_kzg_commitments list item
|
||||
ExecutionBranchDepth = 4 // ExecutionBranchDepth defines the number of leaves in a merkle proof of the execution payload header.
|
||||
SyncCommitteeBranchDepth = 5 // SyncCommitteeBranchDepth defines the number of leaves in a merkle proof of a sync committee.
|
||||
|
||||
@@ -31,6 +31,7 @@ const (
|
||||
BlobLength = 131072 // BlobLength defines the byte length of a blob.
|
||||
BlobSize = 131072 // defined to match blob.size in bazel ssz codegen
|
||||
BlobSidecarSize = 131928 // defined to match blob sidecar size in bazel ssz codegen
|
||||
KzgCommitmentSize = 48 // KzgCommitmentSize defines the byte length of a KZG commitment.
|
||||
KzgCommitmentInclusionProofDepth = 10 // Merkle proof depth for blob_kzg_commitments list item
|
||||
ExecutionBranchDepth = 4 // ExecutionBranchDepth defines the number of leaves in a merkle proof of the execution payload header.
|
||||
SyncCommitteeBranchDepth = 5 // SyncCommitteeBranchDepth defines the number of leaves in a merkle proof of a sync committee.
|
||||
|
||||
@@ -2,6 +2,8 @@ package blocks
|
||||
|
||||
import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -57,6 +59,21 @@ func (dc *RODataColumn) BlockRoot() [fieldparams.RootLength]byte {
|
||||
return dc.root
|
||||
}
|
||||
|
||||
// Slot returns the slot of the data column sidecar.
|
||||
func (dc *RODataColumn) Slot() primitives.Slot {
|
||||
return dc.SignedBlockHeader.Header.Slot
|
||||
}
|
||||
|
||||
// ParentRoot returns the parent root of the data column sidecar.
|
||||
func (dc *RODataColumn) ParentRoot() [fieldparams.RootLength]byte {
|
||||
return bytesutil.ToBytes32(dc.SignedBlockHeader.Header.ParentRoot)
|
||||
}
|
||||
|
||||
// ProposerIndex returns the proposer index of the data column sidecar.
|
||||
func (dc *RODataColumn) ProposerIndex() primitives.ValidatorIndex {
|
||||
return dc.SignedBlockHeader.Header.ProposerIndex
|
||||
}
|
||||
|
||||
// VerifiedRODataColumn represents an RODataColumn that has undergone full verification (eg block sig, inclusion proof, commitment check).
|
||||
type VerifiedRODataColumn struct {
|
||||
RODataColumn
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -118,8 +119,52 @@ func TestNewRODataColumnWithAndWithoutRoot(t *testing.T) {
|
||||
|
||||
func TestDataColumn_BlockRoot(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{1}
|
||||
dataColumn := &RODataColumn{
|
||||
root: root,
|
||||
}
|
||||
dataColumn := &RODataColumn{root: root}
|
||||
assert.Equal(t, root, dataColumn.BlockRoot())
|
||||
}
|
||||
|
||||
func TestDataColumn_Slot(t *testing.T) {
|
||||
slot := primitives.Slot(1)
|
||||
|
||||
dataColumn := &RODataColumn{
|
||||
DataColumnSidecar: ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: slot,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, slot, dataColumn.Slot())
|
||||
}
|
||||
|
||||
func TestDataColumn_ParentRoot(t *testing.T) {
|
||||
root := [fieldparams.RootLength]byte{1}
|
||||
dataColumn := &RODataColumn{
|
||||
DataColumnSidecar: ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ParentRoot: root[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, root, dataColumn.ParentRoot())
|
||||
}
|
||||
|
||||
func TestDataColumn_ProposerIndex(t *testing.T) {
|
||||
proposerIndex := primitives.ValidatorIndex(1)
|
||||
dataColumn := &RODataColumn{
|
||||
DataColumnSidecar: ðpb.DataColumnSidecar{
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
ProposerIndex: proposerIndex,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assert.Equal(t, proposerIndex, dataColumn.ProposerIndex())
|
||||
}
|
||||
|
||||
@@ -2,7 +2,10 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["blob.go"],
|
||||
srcs = [
|
||||
"blob.go",
|
||||
"data_column.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/runtime/logging",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
|
||||
23
runtime/logging/data_column.go
Normal file
23
runtime/logging/data_column.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DataColumnFields extracts a standard set of fields from a DataColumnSidecar into a logrus.Fields struct
|
||||
// which can be passed to log.WithFields.
|
||||
func DataColumnFields(column blocks.RODataColumn) logrus.Fields {
|
||||
kzgCommitmentCount := len(column.KzgCommitments)
|
||||
|
||||
return logrus.Fields{
|
||||
"slot": column.Slot(),
|
||||
"propIdx": column.ProposerIndex(),
|
||||
"blockRoot": fmt.Sprintf("%#x", column.BlockRoot())[:8],
|
||||
"parentRoot": fmt.Sprintf("%#x", column.ParentRoot())[:8],
|
||||
"kzgCommitmentCount": kzgCommitmentCount,
|
||||
"colIdx": column.Index,
|
||||
}
|
||||
}
|
||||
@@ -36,6 +36,7 @@ go_library(
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
|
||||
@@ -1,13 +1,190 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethTypes "github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
func GenerateCellsAndProofs(t *testing.T, blobs []kzg.Blob) []kzg.CellsAndProofs {
|
||||
type FuluBlockGeneratorOption func(*fuluBlockGenerator)
|
||||
|
||||
type fuluBlockGenerator struct {
|
||||
parent [32]byte
|
||||
slot primitives.Slot
|
||||
blobCount int
|
||||
sign bool
|
||||
sk bls.SecretKey
|
||||
proposer primitives.ValidatorIndex
|
||||
valRoot []byte
|
||||
payload *enginev1.ExecutionPayloadDeneb
|
||||
}
|
||||
|
||||
func WithFuluProposerSigning(idx primitives.ValidatorIndex, sk bls.SecretKey, valRoot []byte) FuluBlockGeneratorOption {
|
||||
return func(g *fuluBlockGenerator) {
|
||||
g.sign = true
|
||||
g.proposer = idx
|
||||
g.sk = sk
|
||||
g.valRoot = valRoot
|
||||
}
|
||||
}
|
||||
|
||||
func WithFuluPayload(p *enginev1.ExecutionPayloadDeneb) FuluBlockGeneratorOption {
|
||||
return func(g *fuluBlockGenerator) {
|
||||
g.payload = p
|
||||
}
|
||||
}
|
||||
|
||||
func WithParentRoot(root [fieldparams.RootLength]byte) FuluBlockGeneratorOption {
|
||||
return func(g *fuluBlockGenerator) {
|
||||
g.parent = root
|
||||
}
|
||||
}
|
||||
|
||||
func GenerateTestFuluBlockWithSidecars(t *testing.T, blobCount int, options ...FuluBlockGeneratorOption) (blocks.ROBlock, []blocks.RODataColumn, []blocks.VerifiedRODataColumn) {
|
||||
generator := &fuluBlockGenerator{blobCount: blobCount}
|
||||
|
||||
for _, option := range options {
|
||||
option(generator)
|
||||
}
|
||||
|
||||
if generator.payload == nil {
|
||||
ads := common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87")
|
||||
tx := gethTypes.NewTx(&gethTypes.LegacyTx{
|
||||
Nonce: 0,
|
||||
To: &ads,
|
||||
Value: big.NewInt(0),
|
||||
Gas: 0,
|
||||
GasPrice: big.NewInt(0),
|
||||
Data: nil,
|
||||
})
|
||||
|
||||
txs := []*gethTypes.Transaction{tx}
|
||||
encodedBinaryTxs := make([][]byte, 1)
|
||||
|
||||
var err error
|
||||
encodedBinaryTxs[0], err = txs[0].MarshalBinary()
|
||||
require.NoError(t, err)
|
||||
|
||||
blockHash := bytesutil.ToBytes32([]byte("foo"))
|
||||
|
||||
generator.payload = &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: bytesutil.PadTo([]byte("stateRoot"), fieldparams.RootLength),
|
||||
ReceiptsRoot: bytesutil.PadTo([]byte("receiptsRoot"), fieldparams.RootLength),
|
||||
LogsBloom: bytesutil.PadTo([]byte("logs"), fieldparams.LogsBloomLength),
|
||||
PrevRandao: blockHash[:],
|
||||
BlockNumber: 0,
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
Timestamp: 0,
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: bytesutil.PadTo([]byte("baseFeePerGas"), fieldparams.RootLength),
|
||||
BlockHash: blockHash[:],
|
||||
Transactions: encodedBinaryTxs,
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
BlobGasUsed: 0,
|
||||
ExcessBlobGas: 0,
|
||||
}
|
||||
}
|
||||
|
||||
block := NewBeaconBlockFulu()
|
||||
block.Block.Body.ExecutionPayload = generator.payload
|
||||
block.Block.Slot = generator.slot
|
||||
block.Block.ParentRoot = generator.parent[:]
|
||||
block.Block.ProposerIndex = generator.proposer
|
||||
|
||||
block.Block.Body.BlobKzgCommitments = make([][]byte, blobCount)
|
||||
for i := range blobCount {
|
||||
var commitment [fieldparams.KzgCommitmentSize]byte
|
||||
binary.LittleEndian.PutUint16(commitment[:16], uint16(i))
|
||||
binary.LittleEndian.PutUint16(commitment[16:32], uint16(generator.slot))
|
||||
block.Block.Body.BlobKzgCommitments[i] = commitment[:]
|
||||
}
|
||||
|
||||
body, err := blocks.NewBeaconBlockBody(block.Block.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
inclusion := make([][][]byte, blobCount)
|
||||
for i := range blobCount {
|
||||
proof, err := blocks.MerkleProofKZGCommitment(body, i)
|
||||
require.NoError(t, err)
|
||||
|
||||
inclusion[i] = proof
|
||||
}
|
||||
|
||||
if generator.sign {
|
||||
epoch := slots.ToEpoch(block.Block.Slot)
|
||||
schedule := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
|
||||
version, err := schedule.VersionForEpoch(epoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
fork, err := schedule.ForkFromVersion(version)
|
||||
require.NoError(t, err)
|
||||
|
||||
domain := params.BeaconConfig().DomainBeaconProposer
|
||||
sig, err := signing.ComputeDomainAndSignWithoutState(fork, epoch, domain, generator.valRoot, block.Block, generator.sk)
|
||||
require.NoError(t, err)
|
||||
|
||||
block.Signature = sig
|
||||
}
|
||||
|
||||
root, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
sbb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
|
||||
sh, err := sbb.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := make([]kzg.Blob, blobCount)
|
||||
for i, commitment := range block.Block.Body.BlobKzgCommitments {
|
||||
roSidecars := GenerateTestDenebBlobSidecar(t, root, sh, i, commitment, inclusion[i])
|
||||
blobs[i] = kzg.Blob(roSidecars.Blob)
|
||||
}
|
||||
|
||||
cellsAndProofs := GenerateCellsAndProofs(t, blobs)
|
||||
|
||||
dataColumns, err := peerdas.DataColumnSidecars(sbb, cellsAndProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
roSidecars := make([]blocks.RODataColumn, 0, len(dataColumns))
|
||||
roVerifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumns))
|
||||
for _, dataColumn := range dataColumns {
|
||||
roSidecar, err := blocks.NewRODataColumnWithRoot(dataColumn, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
roVerifiedSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
|
||||
roSidecars = append(roSidecars, roSidecar)
|
||||
roVerifiedSidecars = append(roVerifiedSidecars, roVerifiedSidecar)
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlockWithRoot(sbb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rob, roSidecars, roVerifiedSidecars
|
||||
}
|
||||
|
||||
func GenerateCellsAndProofs(t testing.TB, blobs []kzg.Blob) []kzg.CellsAndProofs {
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, len(blobs))
|
||||
for i := range blobs {
|
||||
cp, err := kzg.ComputeCellsAndKZGProofs(&blobs[i])
|
||||
|
||||
Reference in New Issue
Block a user