Compare commits

...

71 Commits

Author SHA1 Message Date
Kasey Kirkham
bf0ccf05b2 remove duplicate HTR in call path 2025-09-14 17:13:59 -05:00
Kasey Kirkham
9aeb89cedd refactor to deduplicate sidecar construction code 2025-09-12 23:14:27 -05:00
Manu NALEPA
81d413ca73 Merge branch 'develop' into peerDAS-getBlobsV2 2025-09-11 16:04:34 +02:00
james-prysm
1933adedbf update to v1.6.0-alpha.6 (#15658)
* wip workspace update

* update ethspecify yml

* fixing ethspecify

* fixing ethspecify

* fixing loader test

* changelog and reverting something in configs

* adding bad hash
2025-09-05 17:20:13 +00:00
Potuz
278b796e43 Fix next epoch proposer duties (#15642)
* Fix next epoch proposer duties

* Do not update state's slot when computing the proposer

Also do not call Fulu's proposer lookahead if the requested epoch is not
current or next.

* retract Terence's test

* Fix tests

* removing epoch check to pass spec test

* reverting rollback and fixing test setup

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: james-prysm <james@prysmaticlabs.com>
2025-08-29 21:45:23 +00:00
satushh
a55d61de9c remove redundant flag option 2025-08-29 19:02:51 +01:00
satushh
4faeac57e1 fix bad merge 2025-08-29 18:58:00 +01:00
satushh
16bed0a93f correct bad merge 2025-08-29 18:45:27 +01:00
satushh
0e9d5f4513 tidy up IsDataAvailable 2025-08-29 18:30:15 +01:00
satushh
65d6be0406 put samplesPerSlot at appropriate place 2025-08-29 18:23:34 +01:00
satushh
115e20de62 fix more tests 2025-08-29 18:23:34 +01:00
satushh
fb4028f736 fix test 2025-08-29 18:23:34 +01:00
satushh
af7470e835 remove redundant function and fix name 2025-08-29 18:23:34 +01:00
satushh
618fe1d1b2 blockchain: get variable samplesPerSlot only when required 2025-08-29 18:23:34 +01:00
satushh
71fe640eef execution: use service context instead of function's for retry 2025-08-29 18:23:34 +01:00
satushh
350fead19b execution: edge case - delete activeRetries on success 2025-08-29 18:23:34 +01:00
satushh
23645e549a sync: new appropriate mock service 2025-08-29 18:23:34 +01:00
satushh
2abe1adb29 sync: fix lint, test and add extra test for when data is actually not available 2025-08-29 18:23:34 +01:00
satushh
c9f3e111e6 lint: formatting and remove confusing comment 2025-08-29 18:23:34 +01:00
satushh
f8410908e9 blockchain: cleaner DA check 2025-08-29 18:23:34 +01:00
satushh
4c328977d9 execution: make reconstructSingleflight part of the service struct 2025-08-29 18:23:34 +01:00
satushh
b98a875194 blockchain: move IsDataAvailable interface to blockchain package 2025-08-29 18:23:34 +01:00
satushh
ee3a7b980a sync: don't call ReconstructDataColumnSidecars if not required 2025-08-29 18:23:34 +01:00
satushh
62dfe6bde5 execution: ensure single responsibility, execution should not do DA check 2025-08-29 18:23:34 +01:00
satushh
3a671abaa0 execution: ensure the retry actually happens when it needs to 2025-08-29 18:23:34 +01:00
satushh
ae6ca74c24 lint: format 2025-08-29 18:23:34 +01:00
satushh
bdd423e100 execution: retry logic inside ReconstructDataColumnSidecars itself 2025-08-29 18:23:34 +01:00
satushh
b9e31f258b lint: lint and use unused metrics 2025-08-29 18:23:34 +01:00
satushh
7c1eef5780 lint: formatting 2025-08-29 18:23:34 +01:00
satushh
88620a1a63 blockchain: fix CustodyGroupCount return 2025-08-29 18:23:34 +01:00
satushh
3c9a0b9511 bazel: bazel run //:gazelle -- fix 2025-08-29 18:23:34 +01:00
satushh
a8c5cfd871 sync: remove unwanted tests 2025-08-29 18:23:34 +01:00
satushh
d3b2122542 da: updated IsDataAvailable 2025-08-29 18:23:34 +01:00
satushh
a12b00c71c execution: retry atomicity test 2025-08-29 18:23:34 +01:00
satushh
358b9acc53 execution: fix test 2025-08-29 18:23:34 +01:00
satushh
aa31a16a9c sync: remove unwanted checks 2025-08-29 18:23:34 +01:00
satushh
86bbb6ee3b da: non blocking checks 2025-08-29 18:23:34 +01:00
satushh
139c2b887c exec: hardcode retry interval 2025-08-29 18:23:34 +01:00
satushh
88b03089d2 sync: no goroutine, getblobsv2 in absence of block as well, wrap error 2025-08-29 18:23:34 +01:00
satushh
bce0960422 engine: remove isDataAlreadyAvailable function 2025-08-29 18:23:34 +01:00
satushh
58f196c38c reconstruct: simplify multi goroutine case and avoid race condition 2025-08-29 18:23:34 +01:00
satushh
de63e24815 reconstruct: load once, correctly deliver the result to all waiting goroutines 2025-08-29 18:23:34 +01:00
satushh
c851987e49 beacon: default retry interval 2025-08-29 18:23:34 +01:00
satushh
141f9adff3 lint: remove unused field 2025-08-29 18:23:34 +01:00
satushh
47bf2bd985 sidecar: recover function and different context for retrying 2025-08-29 18:23:34 +01:00
satushh
fb28a17fd5 config: make retry interval configurable 2025-08-29 18:23:34 +01:00
satushh
338d4c29d2 lint: return error when it is not nil 2025-08-29 18:23:34 +01:00
satushh
35f6d1277d lint: fmt and log capitalisation 2025-08-29 18:23:34 +01:00
satushh
77b3d23c86 test: engine client and sync package, metrics 2025-08-29 18:23:34 +01:00
satushh
32e81a33dd getBlobsV2: retry if reconstruction isnt successful 2025-08-29 18:23:34 +01:00
Manu NALEPA
54ce47a839 Add DataColumnStorage and SubscribeAllDataSubnets flag. 2025-08-29 18:23:34 +01:00
Manu NALEPA
208febf884 Fix Potuz's comment. 2025-08-29 18:23:34 +01:00
Manu NALEPA
b3c9cc2459 selectPeers: Avoid map with key but empty value. 2025-08-29 18:23:34 +01:00
Manu NALEPA
d8895ec49c Revert "Fix James' comment."
This reverts commit a3f919205a.
2025-08-29 18:23:34 +01:00
Manu NALEPA
5e593d50eb Revert "Fix Potuz's comment."
This reverts commit c45230b455.
2025-08-29 18:23:34 +01:00
Manu NALEPA
4878468047 Fix flakiness in TestSelectPeers. 2025-08-29 18:23:34 +01:00
Manu NALEPA
e10045cf8c Fix James' comment. 2025-08-29 18:23:34 +01:00
Manu NALEPA
b18c4b99c7 Implement TestSelectPeers. 2025-08-29 18:23:34 +01:00
Manu NALEPA
272a95934f Implement TestFetchDataColumnSidecarsFromPeers. 2025-08-29 18:23:34 +01:00
Manu NALEPA
e4582ed1e2 Fix Potuz's comment. 2025-08-29 18:23:34 +01:00
Manu NALEPA
f2cc2e7128 Fix Potuz's comment. 2025-08-29 18:23:34 +01:00
Manu NALEPA
84b1bc7635 Fix Potuz's comment. 2025-08-29 18:23:34 +01:00
Manu NALEPA
c51a8f5965 Fix Potuz's comment. 2025-08-29 18:23:34 +01:00
Manu NALEPA
fdb858473e Fix Potuz's comment. 2025-08-29 18:23:34 +01:00
Manu NALEPA
e2356bb7ad PeerDAS: Implement sync 2025-08-29 18:23:34 +01:00
Manu NALEPA
8e52d0c3c6 Retry fetch origin data column sidecars. (#15634)
* `convertToAddrInfo`: Add peer details on error.

* `fetchOriginColumns`: Remove unused argument.

* `fetchOriginColumns`: Fix typo

* `isSidecarIndexRequested`: Log requested indices.

* `fetchOriginColumns`: Retry.

* Add changelog.

* Fix Preston comment.

* `custodyGroupCountFromPeerENR`: Add agent on error messages.

* Update beacon-chain/sync/initial-sync/service.go

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>

* Fix `TestCustodyGroupCountFromPeer`.

* `s.fetchOriginColumns`: Use `maxAttempts` and `delay` as parameters to ease unit testing.

* Implement `TestFetchOriginColumns`.

* `SendDataColumnSidecarsByRangeRequest` and `SendDataColumnSidecarsByRootRequest`: Add option to downscore the peer on RPC error.

* `fetchOriginColumns`: Remove max attempts, and downscore peers on RPC fault.

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-08-28 21:16:34 +00:00
james-prysm
d339e09509 fulu block proposals with datacolumn broadcast (#15628)
* propose block changes from peerdas branch

* breaking out broadcast code into its own helper, changing fulu broadcast for rest api to properly send datasidecars

* renamed validate blobsidecars to validate blobs, and added check for max blobs

* gofmt

* adding in batch verification for blobs"

* changelog

* adding kzg tests, moving new kzg functions to validation.go

* linting and other small fixes

* fixing linting issues and adding some proposer tests

* missing dependencies

* fixing test

* fixing more tests

* gaz

* removed return on broadcast data columns

* more cleanup and unit test adjustments

* missed removal of unneeded field

* adding data column receiver initialization

* Update beacon-chain/rpc/eth/beacon/handlers.go

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>

* partial review feedback from manu

* gaz

* reverting some code to peerdas as I don't believe the broadcast code needs to be reused

* missed removal of build dependency

* fixing tests and adding another test based on manu's suggestion

* fixing linting

* Update beacon-chain/rpc/eth/beacon/handlers.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* Update beacon-chain/blockchain/kzg/validation.go

Co-authored-by: Radosław Kapka <rkapka@wp.pl>

* radek's review changes

* adding missed test

---------

Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2025-08-28 16:27:30 +00:00
Pop Chunhapanya
8ec460223c Swap the wrong arguments in a call (#15639)
* Swap the wrong arguments in a call

I saw that the names of the passed arguments and the ones of the
function parameters don't match, so I suspect that it's a bug.

* Add changelog

* Add validation for the fillInForkChocieMissingBlocks checkpoints.

* Add test for checkpoint epoch validation in fillInForkChoiceMissingBlocks.

* Use a sentinel error rather than error string

---------

Co-authored-by: kasey <489222+kasey@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@pvl.dev>
2025-08-27 21:29:44 +00:00
Potuz
349d9d2fd0 Start from Justified checkpoint by default (#15636)
By default when starting a node, we load the finalized checkpoint from
db and set it as head. When the chain has not been finalizing for a
while and the user does not start from the latest head, it may still be
benefitial to start from the latest justified checkpoint that has to be
a descendant of the finalized one.
2025-08-27 15:50:10 +00:00
Sahil Sojitra
e0aecb9c32 Refactor to use atomic types (#15625)
* refactor to use atomic types

* added changlog fragment file

* update change log
2025-08-26 19:57:14 +00:00
Potuz
4a1ab70929 Change error message (#15635) 2025-08-26 17:24:32 +00:00
87 changed files with 2889 additions and 870 deletions

View File

@@ -253,16 +253,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.6.0-alpha.5"
consensus_spec_version = "v1.6.0-alpha.6"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-BXuEb1XbeSft0qzVFnoB8KC0YR1qM3ybT5lKUDbUWn8=",
"minimal": "sha256-EjwSHgBbWSoy5hm9V+A/bVMabyojaKsBNPrRtuPVq4k=",
"mainnet": "sha256-OGWMzarzaV1B9mVpy48/DCUbhjfX+b64pAxWwPLWhAs=",
"general": "sha256-7wkWuahuCO37uVYnxq8Badvi+jY907pBj68ixL8XDOI=",
"minimal": "sha256-Qy/f27N0LffS/ej7VhIubwDejD6LMK0VdenKkqtZVt4=",
"mainnet": "sha256-3H7mu5yE+FGz2Wr/nc8Nd9aEu93YoEpsYtn0zBSoeDE=",
},
version = consensus_spec_version,
)
@@ -278,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-FQWR5EZuVcQGR0ol9vpd7eunnfGexJ/7J3xycrFEJbU=",
integrity = "sha256-uvz3XfMTGfy3/BtQQoEp5XQOgrWgcH/5Zo/gR0iiP+k=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -14,7 +14,10 @@ const BytesPerBlob = ckzg4844.BytesPerBlob
type Blob [BytesPerBlob]byte
// BytesPerCell is the number of bytes in a single cell.
const BytesPerCell = ckzg4844.BytesPerCell
const (
BytesPerCell = ckzg4844.BytesPerCell
BytesPerProof = ckzg4844.BytesPerProof
)
// Cell represents a chunk of an encoded Blob.
type Cell [BytesPerCell]byte
@@ -23,7 +26,7 @@ type Cell [BytesPerCell]byte
type Commitment [48]byte
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
type Proof [48]byte
type Proof [BytesPerProof]byte
// Bytes48 is a 48-byte array.
type Bytes48 = ckzg4844.Bytes48
@@ -102,7 +105,6 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
for i := range cells {
ckzgCells[i] = ckzg4844.Cell(cells[i])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
}

View File

@@ -1,10 +1,30 @@
package kzg
import (
"fmt"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
GoKZG "github.com/crate-crypto/go-kzg-4844"
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
"github.com/pkg/errors"
)
func bytesToBlob(blob []byte) *GoKZG.Blob {
var ret GoKZG.Blob
copy(ret[:], blob)
return &ret
}
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
copy(ret[:], commitment)
return
}
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
copy(ret[:], proof)
return
}
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
func Verify(blobSidecars ...blocks.ROBlob) error {
if len(blobSidecars) == 0 {
@@ -27,18 +47,121 @@ func Verify(blobSidecars ...blocks.ROBlob) error {
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
}
func bytesToBlob(blob []byte) *GoKZG.Blob {
var ret GoKZG.Blob
copy(ret[:], blob)
return &ret
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
// This is more efficient than verifying each blob individually when len(blobs) > 1.
// For single blob verification, it uses the optimized single verification path.
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
}
if len(blobs) == 0 {
return nil
}
// Optimize for single blob case - use single verification to avoid batch overhead
if len(blobs) == 1 {
return kzgContext.VerifyBlobKZGProof(
bytesToBlob(blobs[0]),
bytesToCommitment(commitments[0]),
bytesToKZGProof(proofs[0]))
}
// Use batch verification for multiple blobs
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
for i := range blobs {
if len(blobs[i]) != len(ckzg4844.Blob{}) {
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
}
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
}
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
}
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
}
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
if err != nil {
return errors.Wrap(err, "batch verification")
}
if !valid {
return errors.New("batch KZG proof verification failed")
}
return nil
}
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
copy(ret[:], commitment)
return
}
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
blobCount := uint64(len(blobs))
expectedCellProofs := blobCount * numberOfColumns
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
copy(ret[:], proof)
return
if uint64(len(cellProofs)) != expectedCellProofs {
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
}
if len(commitments) != len(blobs) {
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
}
if blobCount == 0 {
return nil
}
// Handle multiple blobs - compute cells for all blobs
allCells := make([]Cell, 0, expectedCellProofs)
allCommitments := make([]Bytes48, 0, expectedCellProofs)
allIndices := make([]uint64, 0, expectedCellProofs)
allProofs := make([]Bytes48, 0, expectedCellProofs)
for blobIndex := range blobs {
if len(blobs[blobIndex]) != len(Blob{}) {
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
}
// Convert blob to kzg.Blob type
blob := Blob(blobs[blobIndex])
// Compute cells for this blob
cells, err := ComputeCells(&blob)
if err != nil {
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
}
// Add cells and corresponding data for each column
for columnIndex := range numberOfColumns {
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
if len(commitments[blobIndex]) != len(Bytes48{}) {
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
}
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
}
allCells = append(allCells, cells[columnIndex])
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
allIndices = append(allIndices, columnIndex)
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
}
}
// Batch verify all cells
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
if err != nil {
return errors.Wrap(err, "cell batch verification")
}
if !valid {
return errors.New("cell KZG proof batch verification failed")
}
return nil
}

View File

@@ -37,6 +37,7 @@ func TestBytesToAny(t *testing.T) {
}
func TestGenerateCommitmentAndProof(t *testing.T) {
require.NoError(t, Start())
blob := random.GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
@@ -45,3 +46,432 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
require.Equal(t, expectedCommitment, commitment)
require.Equal(t, expectedProof, proof)
}
func TestVerifyBlobKZGProofBatch(t *testing.T) {
// Initialize KZG for testing
require.NoError(t, Start())
t.Run("valid single blob batch", func(t *testing.T) {
blob := random.GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
proofs := [][]byte{proof[:]}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.NoError(t, err)
})
t.Run("valid multiple blob batch", func(t *testing.T) {
blobCount := 3
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
proofs := make([][]byte, blobCount)
for i := 0; i < blobCount; i++ {
blob := random.GetRandBlob(int64(i))
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
blobs[i] = blob[:]
commitments[i] = commitment[:]
proofs[i] = proof[:]
}
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.NoError(t, err)
})
t.Run("empty inputs should pass", func(t *testing.T) {
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
require.NoError(t, err)
})
t.Run("mismatched input lengths", func(t *testing.T) {
blob := random.GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
// Test different mismatch scenarios
err = VerifyBlobKZGProofBatch(
[][]byte{blob[:]},
[][]byte{},
[][]byte{proof[:]},
)
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
err = VerifyBlobKZGProofBatch(
[][]byte{blob[:], blob[:]},
[][]byte{commitment[:]},
[][]byte{proof[:], proof[:]},
)
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
})
t.Run("invalid commitment should fail", func(t *testing.T) {
blob := random.GetRandBlob(123)
_, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
// Use a different blob's commitment (mismatch)
differentBlob := random.GetRandBlob(456)
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{wrongCommitment[:]}
proofs := [][]byte{proof[:]}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
// Single blob optimization uses different error message
require.ErrorContains(t, "can't verify opening proof", err)
})
t.Run("invalid proof should fail", func(t *testing.T) {
blob := random.GetRandBlob(123)
commitment, _, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
// Use wrong proof
invalidProof := make([]byte, 48) // All zeros
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
proofs := [][]byte{invalidProof}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.ErrorContains(t, "short buffer", err)
})
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
// First blob - valid
blob1 := random.GetRandBlob(123)
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
require.NoError(t, err)
// Second blob - invalid proof
blob2 := random.GetRandBlob(456)
commitment2, _, err := GenerateCommitmentAndProof(blob2)
require.NoError(t, err)
invalidProof := make([]byte, 48) // All zeros
blobs := [][]byte{blob1[:], blob2[:]}
commitments := [][]byte{commitment1[:], commitment2[:]}
proofs := [][]byte{proof1[:], invalidProof}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.ErrorContains(t, "batch verification", err)
})
t.Run("batch KZG proof verification failed", func(t *testing.T) {
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
blob1 := random.GetRandBlob(123)
blob2 := random.GetRandBlob(456)
// Generate valid proof for blob1
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
require.NoError(t, err)
// Generate valid proof for blob2 but use wrong commitment (from blob1)
_, proof2, err := GenerateCommitmentAndProof(blob2)
require.NoError(t, err)
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
blobs := [][]byte{blob1[:], blob2[:]}
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
proofs := [][]byte{proof1[:], proof2[:]}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.ErrorContains(t, "batch KZG proof verification failed", err)
})
}
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
// Initialize KZG for testing
require.NoError(t, Start())
t.Run("valid single blob cell verification", func(t *testing.T) {
numberOfColumns := uint64(128)
// Generate blob and commitment
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
// Compute cells and proofs
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Create flattened cell proofs (like execution client format)
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = cellsAndProofs.Proofs[i][:]
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.NoError(t, err)
})
t.Run("valid multiple blob cell verification", func(t *testing.T) {
numberOfColumns := uint64(128)
blobCount := 2
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
for i := range blobCount {
// Generate blob and commitment
randBlob := random.GetRandBlob(int64(i))
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
// Compute cells and proofs
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
blobs[i] = blob[:]
commitments[i] = commitment[:]
// Add cell proofs for this blob
for j := range numberOfColumns {
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
}
}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
require.NoError(t, err)
})
t.Run("empty inputs should pass", func(t *testing.T) {
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
require.NoError(t, err)
})
t.Run("mismatched blob and commitment count", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
err := VerifyCellKZGProofBatchFromBlobData(
[][]byte{blob[:]},
[][]byte{}, // Empty commitments
[][]byte{},
128,
)
require.ErrorContains(t, "expected 128 cell proofs", err)
})
t.Run("wrong cell proof count", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
wrongCellProofs := make([][]byte, 10)
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
})
t.Run("invalid cell proofs should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
// Create invalid cell proofs (all zeros)
invalidCellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
invalidCellProofs[i] = make([]byte, 48) // All zeros
}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
require.ErrorContains(t, "cell batch verification", err)
})
t.Run("mismatched commitment should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
// Generate blob and correct cell proofs
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Generate wrong commitment from different blob
randBlob2 := random.GetRandBlob(456)
var differentBlob Blob
copy(differentBlob[:], randBlob2[:])
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
require.NoError(t, err)
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = cellsAndProofs.Proofs[i][:]
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{wrongCommitment[:]}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
})
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
numberOfColumns := uint64(128)
// Create invalid blob (not properly formatted)
invalidBlobData := make([]byte, 10) // Too short
commitment := make([]byte, 48) // Dummy commitment
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = make([]byte, 48)
}
blobs := [][]byte{invalidBlobData}
commitments := [][]byte{commitment}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.NotNil(t, err)
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
})
t.Run("invalid commitment size should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
// Create invalid commitment (wrong size)
invalidCommitment := make([]byte, 32) // Should be 48 bytes
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = make([]byte, 48)
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{invalidCommitment}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
})
t.Run("invalid cell proof size should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
// Create invalid cell proofs (wrong size)
invalidCellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
if i == 0 {
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
} else {
invalidCellProofs[i] = make([]byte, 48)
}
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
})
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
numberOfColumns := uint64(128)
blobCount := 2
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
// First blob - valid
randBlob1 := random.GetRandBlob(123)
var blob1 Blob
copy(blob1[:], randBlob1[:])
commitment1, err := BlobToKZGCommitment(&blob1)
require.NoError(t, err)
blobs[0] = blob1[:]
commitments[0] = commitment1[:]
// Second blob - use invalid commitment size
randBlob2 := random.GetRandBlob(456)
var blob2 Blob
copy(blob2[:], randBlob2[:])
blobs[1] = blob2[:]
commitments[1] = make([]byte, 32) // Wrong size
// Add cell proofs for both blobs
for i := 0; i < blobCount; i++ {
for j := uint64(0); j < numberOfColumns; j++ {
allCellProofs = append(allCellProofs, make([]byte, 48))
}
}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
})
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
numberOfColumns := uint64(128)
blobCount := 2
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
for i := 0; i < blobCount; i++ {
randBlob := random.GetRandBlob(int64(i))
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
blobs[i] = blob[:]
commitments[i] = commitment[:]
// Add cell proofs - make some invalid in the second blob
for j := uint64(0); j < numberOfColumns; j++ {
if i == 1 && j == 64 {
// Invalid proof size in middle of second blob's proofs
allCellProofs = append(allCellProofs, make([]byte, 20))
} else {
allCellProofs = append(allCellProofs, make([]byte, 48))
}
}
}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
})
}

View File

@@ -159,7 +159,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
}
// Fill in missing blocks
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
}
@@ -664,14 +664,14 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
func (s *Service) isDataAvailable(
ctx context.Context,
root [fieldparams.RootLength]byte,
signedBlock interfaces.ReadOnlySignedBeaconBlock,
roBlock consensusblocks.ROBlock,
) error {
block := signedBlock.Block()
block := roBlock.Block()
if block == nil {
return errors.New("invalid nil beacon block")
}
root := roBlock.Root()
blockVersion := block.Version()
if blockVersion >= version.Fulu {
return s.areDataColumnsAvailable(ctx, root, block)
@@ -691,8 +691,6 @@ func (s *Service) areDataColumnsAvailable(
root [fieldparams.RootLength]byte,
block interfaces.ReadOnlyBeaconBlock,
) error {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
@@ -726,6 +724,7 @@ func (s *Service) areDataColumnsAvailable(
// Compute the sampling size.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
samplingSize := max(samplesPerSlot, custodyGroupCount)
// Get the peer info for the node.

View File

@@ -30,6 +30,10 @@ import (
"github.com/sirupsen/logrus"
)
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
// CurrentSlot returns the current slot based on time.
func (s *Service) CurrentSlot() primitives.Slot {
return slots.CurrentSlot(s.genesisTime)
@@ -454,6 +458,9 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
if fCheckpoint.Epoch > jCheckpoint.Epoch {
return ErrInvalidCheckpointArgs
}
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
// Fork choice only matters from last finalized slot.

View File

@@ -375,6 +375,81 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
}
func TestFillForkChoiceMissingBlocks_ErrorCases(t *testing.T) {
tests := []struct {
name string
finalizedEpoch primitives.Epoch
justifiedEpoch primitives.Epoch
expectedError error
}{
{
name: "finalized epoch greater than justified epoch",
finalizedEpoch: 5,
justifiedEpoch: 3,
expectedError: ErrInvalidCheckpointArgs,
},
{
name: "valid case - finalized equal to justified",
finalizedEpoch: 3,
justifiedEpoch: 3,
expectedError: nil,
},
{
name: "valid case - finalized less than justified",
finalizedEpoch: 2,
justifiedEpoch: 3,
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
// Create a simple block for testing
blk := util.NewBeaconBlock()
blk.Block.Slot = 10
blk.Block.ParentRoot = service.originBlockRoot[:]
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, blk)
// Create checkpoints with test case epochs
finalizedCheckpoint := &ethpb.Checkpoint{
Epoch: tt.finalizedEpoch,
Root: service.originBlockRoot[:],
}
justifiedCheckpoint := &ethpb.Checkpoint{
Epoch: tt.justifiedEpoch,
Root: service.originBlockRoot[:],
}
// Set up forkchoice store to avoid other errors
fcp := &ethpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, service.originBlockRoot, service.originBlockRoot, [32]byte{}, fcp, fcp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
err = service.fillInForkChoiceMissingBlocks(
t.Context(), wsb, finalizedCheckpoint, justifiedCheckpoint)
if tt.expectedError != nil {
require.ErrorIs(t, err, tt.expectedError)
} else {
// For valid cases, we might get other errors (like block not being descendant of finalized)
// but we shouldn't get the checkpoint validation error
if err != nil && errors.Is(err, tt.expectedError) {
t.Errorf("Unexpected checkpoint validation error: %v", err)
}
}
})
}
}
// blockTree1 constructs the following tree:
//
// /- B1
@@ -2132,13 +2207,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
// Forkchoice has the genesisRoot loaded at startup
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
// Service's store has the finalized state as headRoot
// Service's store has the justified checkpoint root as headRoot (verified below through justified checkpoint comparison)
headRoot, err := service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
require.NotEqual(t, bytesutil.ToBytes32(params.BeaconConfig().ZeroHash[:]), bytesutil.ToBytes32(headRoot)) // Ensure head is not zero
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
require.Equal(t, true, optimistic) // Head is now optimistic when starting from justified checkpoint
// Check that the node's justified checkpoint does not agree with the
// last valid state's justified checkpoint
@@ -2877,14 +2952,18 @@ func TestIsDataAvailable(t *testing.T) {
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
err := service.isDataAvailable(ctx, root, signed)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
t.Run("Fulu - no commitment in blocks", func(t *testing.T) {
ctx, _, service, root, signed := testIsAvailableSetup(t, testIsAvailableParams{})
err := service.isDataAvailable(ctx, root, signed)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
@@ -2902,7 +2981,9 @@ func TestIsDataAvailable(t *testing.T) {
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
err := service.isDataAvailable(ctx, root, signed)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
@@ -2914,7 +2995,9 @@ func TestIsDataAvailable(t *testing.T) {
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
err := service.isDataAvailable(ctx, root, signed)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
@@ -2962,7 +3045,9 @@ func TestIsDataAvailable(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
defer cancel()
err = service.isDataAvailable(ctx, root, signed)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
@@ -3024,7 +3109,9 @@ func TestIsDataAvailable(t *testing.T) {
ctx, cancel := context.WithTimeout(ctx, time.Second*2)
defer cancel()
err = service.isDataAvailable(ctx, root, signed)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NoError(t, err)
})
@@ -3043,7 +3130,9 @@ func TestIsDataAvailable(t *testing.T) {
cancel()
}()
err := service.isDataAvailable(ctx, root, signed)
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
require.NoError(t, err)
err = service.isDataAvailable(ctx, roBlock)
require.NotNil(t, err)
})
}

View File

@@ -16,7 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/slasher/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/features"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -112,7 +111,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
return err
}
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
daWaitedTime, err := s.handleDA(ctx, avs, roblock)
if err != nil {
return err
}
@@ -240,37 +239,19 @@ func (s *Service) validateExecutionAndConsensus(
return postState, isValidPayload, nil
}
func (s *Service) handleDA(
ctx context.Context,
block interfaces.SignedBeaconBlock,
blockRoot [fieldparams.RootLength]byte,
avs das.AvailabilityStore,
) (elapsed time.Duration, err error) {
defer func(start time.Time) {
elapsed = time.Since(start)
if err == nil {
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
}
}(time.Now())
if avs == nil {
if err = s.isDataAvailable(ctx, blockRoot, block); err != nil {
return
}
return
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
var err error
start := time.Now()
if avs != nil {
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), block)
} else {
err = s.isDataAvailable(ctx, block)
}
var rob blocks.ROBlock
rob, err = blocks.NewROBlockWithRoot(block, blockRoot)
elapsed := time.Since(start)
if err != nil {
return
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
}
err = avs.IsDataAvailable(ctx, s.CurrentSlot(), rob)
return
return elapsed, err
}
func (s *Service) reportPostBlockProcessing(

View File

@@ -192,7 +192,9 @@ func TestHandleDA(t *testing.T) {
require.NoError(t, err)
s, _ := minimalTestService(t)
elapsed, err := s.handleDA(t.Context(), signedBeaconBlock, [fieldparams.RootLength]byte{}, nil)
block, err := blocks.NewROBlockWithRoot(signedBeaconBlock, [32]byte{})
require.NoError(t, err)
elapsed, err := s.handleDA(t.Context(), nil, block)
require.NoError(t, err)
require.Equal(t, true, elapsed > 0, "Elapsed time should be greater than 0")
}

View File

@@ -20,7 +20,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
return errors.Wrap(err, "could not set up forkchoice checkpoints")
}
if err := s.setupForkchoiceTree(st); err != nil {
return errors.Wrap(err, "could not set up forkchoice root")
return errors.Wrap(err, "could not set up forkchoice tree")
}
if err := s.initializeHead(s.ctx, st); err != nil {
return errors.Wrap(err, "could not initialize head from db")
@@ -30,24 +30,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
func (s *Service) startupHeadRoot() [32]byte {
headStr := features.Get().ForceHead
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
jp := s.CurrentJustifiedCheckpt()
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
if headStr == "" {
return fRoot
return jRoot
}
if headStr == "head" {
root, err := s.cfg.BeaconDB.HeadBlockRoot()
if err != nil {
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
return fRoot
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
return jRoot
}
log.Infof("Using Head root of %#x", root)
return root
}
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
if err != nil {
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
return fRoot
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
return jRoot
}
return [32]byte(root)
}

View File

@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
})
defer resetCfg()
require.Equal(t, service.startupHeadRoot(), gr)
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
})
st, _ := util.DeterministicGenesisState(t, 64)

View File

@@ -24,8 +24,8 @@ import (
p2pTesting "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
return nil
}
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
mb.broadcastCalled = true
return nil
}

View File

@@ -317,23 +317,15 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
}
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
originalStateSlot := state.Slot()
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Skip proposer assignment for genesis slot.
if slot == 0 {
continue
}
// Set the state's current slot.
if err := state.SetSlot(slot); err != nil {
return nil, err
}
// Determine the proposer index for the current slot.
i, err := BeaconProposerIndex(ctx, state)
i, err := BeaconProposerIndexAtSlot(ctx, state, slot)
if err != nil {
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
return nil, errors.Wrapf(err, "could not check proposer at slot %d", slot)
}
// Append the slot to the proposer's assignments.
@@ -342,12 +334,6 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
}
proposerAssignments[i] = append(proposerAssignments[i], slot)
}
// Reset state back to its original slot.
if err := state.SetSlot(originalStateSlot); err != nil {
return nil, err
}
return proposerAssignments, nil
}

View File

@@ -309,23 +309,29 @@ func beaconProposerIndexAtSlotFulu(state state.ReadOnlyBeaconState, slot primiti
if err != nil {
return 0, errors.Wrap(err, "could not get proposer lookahead")
}
spe := params.BeaconConfig().SlotsPerEpoch
if e == stateEpoch {
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch], nil
return lookAhead[slot%spe], nil
}
// The caller is requesting the proposer for the next epoch
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch+params.BeaconConfig().SlotsPerEpoch], nil
return lookAhead[spe+slot%spe], nil
}
// BeaconProposerIndexAtSlot returns proposer index at the given slot from the
// point of view of the given state as head state
func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
if state.Version() >= version.Fulu {
return beaconProposerIndexAtSlotFulu(state, slot)
}
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(state.Slot())
// Even if the state is post Fulu, we may request a past proposer index.
if state.Version() >= version.Fulu && e >= params.BeaconConfig().FuluForkEpoch {
// We can use the cached lookahead only for the current and the next epoch.
if e == stateEpoch || e == stateEpoch+1 {
return beaconProposerIndexAtSlotFulu(state, slot)
}
}
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
// For simplicity, the node will skip caching of genesis epoch.
if e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
// For simplicity, the node will skip caching of genesis epoch. If the passed state has not yet reached this slot then we do not check the cache.
if e <= stateEpoch && e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
s, err := slots.EpochEnd(e - 1)
if err != nil {
return 0, err

View File

@@ -1161,6 +1161,10 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
}
func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 1
params.OverrideBeaconConfig(cfg)
lookahead := make([]uint64, 64)
lookahead[0] = 15
lookahead[1] = 16
@@ -1180,8 +1184,4 @@ func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 130)
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(42), idx)
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 95)
require.ErrorContains(t, "slot 95 is not in the current epoch 3 or the next epoch", err)
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 160)
require.ErrorContains(t, "slot 160 is not in the current epoch 3 or the next epoch", err)
}

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"das_core.go",
"info.go",
"log.go",
"metrics.go",
"p2p_interface.go",
"reconstruction.go",
@@ -19,7 +20,6 @@ go_library(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/hash:go_default_library",
@@ -33,6 +33,7 @@ go_library(
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -4,15 +4,10 @@ import (
"encoding/binary"
"math"
"slices"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/holiman/uint256"
"github.com/pkg/errors"
@@ -20,12 +15,9 @@ import (
var (
// Custom errors
ErrCustodyGroupTooLarge = errors.New("custody group too large")
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
ErrCustodyGroupTooLarge = errors.New("custody group too large")
ErrCustodyGroupCountTooLarge = errors.New("custody group count too large")
errWrongComputedCustodyGroupCount = errors.New("wrong computed custody group count, should never happen")
// maxUint256 is the maximum value of an uint256.
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
@@ -117,44 +109,6 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
return columns, nil
}
// DataColumnSidecars computes the data column sidecars from the signed block, cells and cell proofs.
// The returned value contains pointers to function parameters.
// (If the caller alterates `cellsAndProofs` afterwards, the returned value will be modified as well.)
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, cellsAndProofs []kzg.CellsAndProofs) ([]*ethpb.DataColumnSidecar, error) {
if signedBlock == nil || signedBlock.IsNil() || len(cellsAndProofs) == 0 {
return nil, nil
}
block := signedBlock.Block()
blockBody := block.Body()
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
if len(blobKzgCommitments) != len(cellsAndProofs) {
return nil, ErrSizeMismatch
}
signedBlockHeader, err := signedBlock.Header()
if err != nil {
return nil, errors.Wrap(err, "signed block header")
}
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
if err != nil {
return nil, errors.Wrap(err, "merkle proof KZG commitments")
}
dataColumnSidecars, err := dataColumnsSidecars(signedBlockHeader, blobKzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
if err != nil {
return nil, errors.Wrap(err, "data column sidecars")
}
return dataColumnSidecars, nil
}
// ComputeCustodyGroupForColumn computes the custody group for a given column.
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
@@ -194,72 +148,3 @@ func CustodyColumns(custodyGroups []uint64) (map[uint64]bool, error) {
return columns, nil
}
// dataColumnsSidecars computes the data column sidecars from the signed block header, the blob KZG commiments,
// the KZG commitment includion proofs and cells and cell proofs.
// The returned value contains pointers to function parameters.
// (If the caller alterates input parameters afterwards, the returned value will be modified as well.)
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars
func dataColumnsSidecars(
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
blobKzgCommitments [][]byte,
kzgCommitmentsInclusionProof [][]byte,
cellsAndProofs []kzg.CellsAndProofs,
) ([]*ethpb.DataColumnSidecar, error) {
start := time.Now()
if len(blobKzgCommitments) != len(cellsAndProofs) {
return nil, ErrSizeMismatch
}
numberOfColumns := params.BeaconConfig().NumberOfColumns
blobsCount := len(cellsAndProofs)
sidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
for columnIndex := range numberOfColumns {
column := make([]kzg.Cell, 0, blobsCount)
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
for rowIndex := range blobsCount {
cellsForRow := cellsAndProofs[rowIndex].Cells
proofsForRow := cellsAndProofs[rowIndex].Proofs
// Validate that we have enough cells and proofs for this column index
if columnIndex >= uint64(len(cellsForRow)) {
return nil, errors.Errorf("column index %d exceeds cells length %d for blob %d", columnIndex, len(cellsForRow), rowIndex)
}
if columnIndex >= uint64(len(proofsForRow)) {
return nil, errors.Errorf("column index %d exceeds proofs length %d for blob %d", columnIndex, len(proofsForRow), rowIndex)
}
cell := cellsForRow[columnIndex]
column = append(column, cell)
kzgProof := proofsForRow[columnIndex]
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
}
columnBytes := make([][]byte, 0, blobsCount)
for i := range column {
columnBytes = append(columnBytes, column[i][:])
}
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
for _, kzgProof := range kzgProofOfColumn {
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, kzgProof[:])
}
sidecar := &ethpb.DataColumnSidecar{
Index: columnIndex,
Column: columnBytes,
KzgCommitments: blobKzgCommitments,
KzgProofs: kzgProofOfColumnBytes,
SignedBlockHeader: signedBlockHeader,
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
}
sidecars = append(sidecars, sidecar)
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return sidecars, nil
}

View File

@@ -3,13 +3,9 @@ package peerdas_test
import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/ethereum/go-ethereum/p2p/enode"
)
@@ -31,93 +27,6 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
require.ErrorIs(t, err, peerdas.ErrCustodyGroupTooLarge)
}
func TestDataColumnSidecars(t *testing.T) {
t.Run("nil signed block", func(t *testing.T) {
var expected []*ethpb.DataColumnSidecar = nil
actual, err := peerdas.DataColumnSidecars(nil, []kzg.CellsAndProofs{})
require.NoError(t, err)
require.DeepSSZEqual(t, expected, actual)
})
t.Run("empty cells and proofs", func(t *testing.T) {
// Create a protobuf signed beacon block.
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
actual, err := peerdas.DataColumnSidecars(signedBeaconBlock, []kzg.CellsAndProofs{})
require.NoError(t, err)
require.IsNil(t, actual)
})
t.Run("sizes mismatch", func(t *testing.T) {
// Create a protobuf signed beacon block.
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
// Create cells and proofs.
cellsAndProofs := make([]kzg.CellsAndProofs, 1)
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
})
t.Run("cells array too short for column index", func(t *testing.T) {
// Create a Fulu block with a blob commitment.
signedBeaconBlockPb := util.NewBeaconBlockFulu()
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
// Create cells and proofs with insufficient cells for the number of columns.
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, 10), // Only 10 cells
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
},
}
// This should fail because the function will try to access columns up to NumberOfColumns
// but we only have 10 cells/proofs.
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
require.ErrorContains(t, "column index", err)
require.ErrorContains(t, "exceeds cells length", err)
})
t.Run("proofs array too short for column index", func(t *testing.T) {
// Create a Fulu block with a blob commitment.
signedBeaconBlockPb := util.NewBeaconBlockFulu()
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
// Create cells and proofs with sufficient cells but insufficient proofs.
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
},
}
// This should fail when trying to access proof beyond index 4.
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
require.ErrorContains(t, "column index", err)
require.ErrorContains(t, "exceeds proofs length", err)
})
}
func TestComputeCustodyGroupForColumn(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()

View File

@@ -63,17 +63,14 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
t.Run("invalid proof", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
sidecars[0].Column[0][0]++ // It is OK to overflow
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
require.ErrorIs(t, err, peerdas.ErrInvalidKZGProof)
})
t.Run("nominal", func(t *testing.T) {
sidecars := generateRandomSidecars(t, seed, blobCount)
roDataColumnSidecars := generateRODataColumnSidecars(t, sidecars)
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
require.NoError(t, err)
})
}
@@ -256,9 +253,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
for i := range int64(b.N) {
// Generate new random sidecars to ensure the KZG backend does not cache anything.
sidecars := generateRandomSidecars(b, i, blobCount)
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars)
for _, sidecar := range roDataColumnSidecars {
for _, sidecar := range sidecars {
sidecars := []blocks.RODataColumn{sidecar}
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
@@ -282,7 +278,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
b.ResetTimer()
for j := range int64(b.N) {
allSidecars := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns)
allSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
for k := int64(0); k < numberOfColumns; k += columnsCount {
// Use different seeds to generate different blobs/commitments
seed := int64(b.N*i) + numberOfColumns*j + blobCount*k
@@ -292,10 +288,8 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.
allSidecars = append(allSidecars, sidecars[k:k+columnsCount]...)
}
roDataColumnSidecars := generateRODataColumnSidecars(b, allSidecars)
b.StartTimer()
err := peerdas.VerifyDataColumnsSidecarKZGProofs(roDataColumnSidecars)
err := peerdas.VerifyDataColumnsSidecarKZGProofs(allSidecars)
b.StopTimer()
require.NoError(b, err)
}
@@ -323,8 +317,7 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
for j := range int64(batchCount) {
// Use different seeds to generate different blobs/commitments
sidecars := generateRandomSidecars(b, int64(batchCount)*i+j*blobCount, blobCount)
roDataColumnSidecars := generateRODataColumnSidecars(b, sidecars[:columnsCount])
allSidecars = append(allSidecars, roDataColumnSidecars)
allSidecars = append(allSidecars, sidecars)
}
for _, sidecars := range allSidecars {
@@ -358,7 +351,7 @@ func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgPr
return roSidecar
}
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataColumnSidecar {
func generateRandomSidecars(t testing.TB, seed, blobCount int64) []blocks.RODataColumn {
dbBlock := util.NewBeaconBlockDeneb()
commitments := make([][]byte, 0, blobCount)
@@ -379,20 +372,10 @@ func generateRandomSidecars(t testing.TB, seed, blobCount int64) []*ethpb.DataCo
require.NoError(t, err)
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
sidecars, err := peerdas.DataColumnSidecars(sBlock, cellsAndProofs)
rob, err := blocks.NewROBlock(sBlock)
require.NoError(t, err)
sidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.NoError(t, err)
return sidecars
}
func generateRODataColumnSidecars(t testing.TB, sidecars []*ethpb.DataColumnSidecar) []blocks.RODataColumn {
roDataColumnSidecars := make([]blocks.RODataColumn, 0, len(sidecars))
for _, sidecar := range sidecars {
roCol, err := blocks.NewRODataColumn(sidecar)
require.NoError(t, err)
roDataColumnSidecars = append(roDataColumnSidecars, roCol)
}
return roDataColumnSidecars
}

View File

@@ -5,7 +5,6 @@ import (
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
@@ -62,12 +61,6 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
return nil, ErrNotEnoughDataColumnSidecars
}
// Sidecars are verified and are committed to the same block.
// All signed block headers, KZG commitments, and inclusion proofs are the same.
signedBlockHeader := referenceSidecar.SignedBlockHeader
kzgCommitments := referenceSidecar.KzgCommitments
kzgCommitmentsInclusionProof := referenceSidecar.KzgCommitmentsInclusionProof
// Recover cells and compute proofs in parallel.
var wg errgroup.Group
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
@@ -100,7 +93,7 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
return nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
}
outSidecars, err := dataColumnsSidecars(signedBlockHeader, kzgCommitments, kzgCommitmentsInclusionProof, cellsAndProofs)
outSidecars, err := ConstructDataColumnSidecar(cellsAndProofs, PopulateFromSidecar(referenceSidecar.RODataColumn))
if err != nil {
return nil, errors.Wrap(err, "data column sidecars from items")
}
@@ -109,71 +102,13 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
// As a consequence, reconstructed sidecars are also verified.
outVerifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(outSidecars))
for _, sidecar := range outSidecars {
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, blockRoot)
if err != nil {
return nil, errors.Wrap(err, "new RO data column with root")
}
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(sidecar)
outVerifiedRoSidecars = append(outVerifiedRoSidecars, verifiedRoSidecar)
}
return outVerifiedRoSidecars, nil
}
// ConstructDataColumnSidecars constructs data column sidecars from a block, (un-extended) blobs and
// cell proofs corresponding the extended blobs. The main purpose of this function is to
// construct data column sidecars from data obtained from the execution client via:
// - `engine_getBlobsV2` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getblobsv2, or
// - `engine_getPayloadV5` - https://github.com/ethereum/execution-apis/blob/main/src/engine/osaka.md#engine_getpayloadv5
// Note: In this function, to stick with the `BlobsBundleV2` format returned by the execution client in `engine_getPayloadV5`,
// cell proofs are "flattened".
func ConstructDataColumnSidecars(block interfaces.ReadOnlySignedBeaconBlock, blobs [][]byte, cellProofs [][]byte) ([]*ethpb.DataColumnSidecar, error) {
// Check if the cells count is equal to the cell proofs count.
numberOfColumns := params.BeaconConfig().NumberOfColumns
blobCount := uint64(len(blobs))
cellProofsCount := uint64(len(cellProofs))
cellsCount := blobCount * numberOfColumns
if cellsCount != cellProofsCount {
return nil, ErrBlobsCellsProofsMismatch
}
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
for i, blob := range blobs {
var kzgBlob kzg.Blob
if copy(kzgBlob[:], blob) != len(kzgBlob) {
return nil, errors.New("wrong blob size - should never happen")
}
// Compute the extended cells from the (non-extended) blob.
cells, err := kzg.ComputeCells(&kzgBlob)
if err != nil {
return nil, errors.Wrap(err, "compute cells")
}
var proofs []kzg.Proof
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
var kzgProof kzg.Proof
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
return nil, errors.New("wrong KZG proof size - should never happen")
}
proofs = append(proofs, kzgProof)
}
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
cellsAndProofs = append(cellsAndProofs, cellsProofs)
}
dataColumnSidecars, err := DataColumnSidecars(block, cellsAndProofs)
if err != nil {
return nil, errors.Wrap(err, "data column sidcars")
}
return dataColumnSidecars, nil
}
// ReconstructBlobs constructs verified read only blobs sidecars from verified read only blob sidecars.
// The following constraints must be satisfied:
// - All `dataColumnSidecars` has to be committed to the same block, and
@@ -256,6 +191,47 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
return blobSidecars, nil
}
// ComputeCellsAndProofs computes the cells and proofs from blobs and cell proofs.
func ComputeCellsAndProofs(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
blobCount := uint64(len(blobs))
cellProofsCount := uint64(len(cellProofs))
cellsCount := blobCount * numberOfColumns
if cellsCount != cellProofsCount {
return nil, ErrBlobsCellsProofsMismatch
}
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
for i, blob := range blobs {
var kzgBlob kzg.Blob
if copy(kzgBlob[:], blob) != len(kzgBlob) {
return nil, errors.New("wrong blob size - should never happen")
}
// Compute the extended cells from the (non-extended) blob.
cells, err := kzg.ComputeCells(&kzgBlob)
if err != nil {
return nil, errors.Wrap(err, "compute cells")
}
var proofs []kzg.Proof
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
var kzgProof kzg.Proof
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
return nil, errors.New("wrong KZG proof size - should never happen")
}
proofs = append(proofs, kzgProof)
}
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
cellsAndProofs = append(cellsAndProofs, cellsProofs)
}
return cellsAndProofs, nil
}
// blobSidecarsFromDataColumnSidecars converts verified data column sidecars to verified blob sidecars.
func blobSidecarsFromDataColumnSidecars(roBlock blocks.ROBlock, dataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
referenceSidecar := dataColumnSidecars[0]

View File

@@ -9,7 +9,6 @@ import (
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/pkg/errors"
@@ -124,50 +123,6 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
})
}
func TestConstructDataColumnSidecars(t *testing.T) {
const (
blobCount = 3
cellsPerBlob = fieldparams.CellsPerBlob
)
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
roBlock, _, baseVerifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
// Extract blobs and proofs from the sidecars.
blobs := make([][]byte, 0, blobCount)
cellProofs := make([][]byte, 0, cellsPerBlob)
for blobIndex := range blobCount {
blob := make([]byte, 0, cellsPerBlob)
for columnIndex := range cellsPerBlob {
cell := baseVerifiedRoSidecars[columnIndex].Column[blobIndex]
blob = append(blob, cell...)
}
blobs = append(blobs, blob)
for columnIndex := range numberOfColumns {
cellProof := baseVerifiedRoSidecars[columnIndex].KzgProofs[blobIndex]
cellProofs = append(cellProofs, cellProof)
}
}
actual, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
require.NoError(t, err)
// Extract the base verified ro sidecars into sidecars.
expected := make([]*ethpb.DataColumnSidecar, 0, len(baseVerifiedRoSidecars))
for _, verifiedRoSidecar := range baseVerifiedRoSidecars {
expected = append(expected, verifiedRoSidecar.DataColumnSidecar)
}
require.DeepSSZEqual(t, expected, actual)
}
func TestReconstructBlobs(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
@@ -250,7 +205,7 @@ func TestReconstructBlobs(t *testing.T) {
// Compute cells and proofs from blob sidecars.
var wg errgroup.Group
blobs := make([][]byte, blobCount)
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
inputCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
for i := range blobCount {
blob := roBlobSidecars[i].Blob
blobs[i] = blob
@@ -267,7 +222,7 @@ func TestReconstructBlobs(t *testing.T) {
// It is safe for multiple goroutines to concurrently write to the same slice,
// as long as they are writing to different indices, which is the case here.
cellsAndProofs[i] = cp
inputCellsAndProofs[i] = cp
return nil
})
@@ -278,25 +233,24 @@ func TestReconstructBlobs(t *testing.T) {
// Flatten proofs.
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
for _, cp := range cellsAndProofs {
for _, cp := range inputCellsAndProofs {
for _, proof := range cp.Proofs {
cellProofs = append(cellProofs, proof[:])
}
}
// Construct data column sidecars.
// It is OK to use the public function `ConstructDataColumnSidecars`, as long as
// `TestConstructDataColumnSidecars` tests pass.
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(roBlock, blobs, cellProofs)
// Compute celles and proofs from the blobs and cell proofs.
cellsAndProofs, err := peerdas.ComputeCellsAndProofs(blobs, cellProofs)
require.NoError(t, err)
// Construct data column sidears from the signed block and cells and proofs.
roDataColumnSidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
// Convert to verified data column sidecars.
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
for _, dataColumnSidecar := range dataColumnSidecars {
roSidecar, err := blocks.NewRODataColumn(dataColumnSidecar)
require.NoError(t, err)
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
for _, roDataColumnSidecar := range roDataColumnSidecars {
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar)
}
@@ -339,3 +293,86 @@ func TestReconstructBlobs(t *testing.T) {
})
}
func TestComputeCellsAndProofs(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
t.Run("mismatched blob and proof counts", func(t *testing.T) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Create one blob but proofs for two blobs
blobs := [][]byte{{}}
// Create proofs for 2 blobs worth of columns
cellProofs := make([][]byte, 2*numberOfColumns)
_, err := peerdas.ComputeCellsAndProofs(blobs, cellProofs)
require.ErrorIs(t, err, peerdas.ErrBlobsCellsProofsMismatch)
})
t.Run("nominal", func(t *testing.T) {
const blobCount = 2
numberOfColumns := params.BeaconConfig().NumberOfColumns
// Generate test blobs
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
// Extract blobs and compute expected cells and proofs
blobs := make([][]byte, blobCount)
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
var wg errgroup.Group
for i := range blobCount {
blob := roBlobSidecars[i].Blob
blobs[i] = blob
wg.Go(func() error {
var kzgBlob kzg.Blob
count := copy(kzgBlob[:], blob)
require.Equal(t, len(kzgBlob), count)
cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
if err != nil {
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
}
expectedCellsAndProofs[i] = cp
return nil
})
}
err := wg.Wait()
require.NoError(t, err)
// Flatten proofs
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
for _, cp := range expectedCellsAndProofs {
for _, proof := range cp.Proofs {
cellProofs = append(cellProofs, proof[:])
}
}
// Test ComputeCellsAndProofs
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofs(blobs, cellProofs)
require.NoError(t, err)
require.Equal(t, blobCount, len(actualCellsAndProofs))
// Verify the results match expected
for i := range blobCount {
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
// Compare cells
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
}
// Compare proofs
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
}
}
})
}

View File

@@ -1,12 +1,24 @@
package peerdas
import (
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
beaconState "github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
var (
ErrNilSignedBlockOrEmptyCellsAndProofs = errors.New("nil signed block or empty cells and proofs")
ErrSizeMismatch = errors.New("mismatch in the number of blob KZG commitments and cellsAndProofs")
ErrNotEnoughDataColumnSidecars = errors.New("not enough columns")
ErrDataColumnSidecarsNotSortedByIndex = errors.New("data column sidecars are not sorted by index")
)
// ValidatorsCustodyRequirement returns the number of custody groups regarding the validator indices attached to the beacon node.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#validator-custody
func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validatorsIndex map[primitives.ValidatorIndex]bool) (uint64, error) {
@@ -28,3 +40,154 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
count := totalNodeBalance / balancePerAdditionalCustodyGroup
return min(max(count, validatorCustodyRequirement), numberOfCustodyGroups), nil
}
// ConstructDataColumnSidecar, given ConstructionPopulator and the cells/proofs associated with each blob in the
// block, assembles sidecars which can be distributed to peers.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block
func ConstructDataColumnSidecar(rows []kzg.CellsAndProofs, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
if len(rows) == 0 {
return nil, nil
}
start := time.Now()
cells, proofs, err := rotateRowsToCols(rows, params.BeaconConfig().NumberOfColumns)
if err != nil {
return nil, errors.Wrap(err, "rotate cells and proofs")
}
maxIdx := params.BeaconConfig().NumberOfColumns
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
for idx := range maxIdx {
sidecar := &ethpb.DataColumnSidecar{
Index: idx,
Column: cells[idx],
KzgProofs: proofs[idx],
}
if err := src.Populate(sidecar); err != nil {
return nil, errors.Wrap(err, "column field setter set")
}
if len(sidecar.KzgCommitments) != len(sidecar.Column) || len(sidecar.KzgCommitments) != len(sidecar.KzgProofs) {
return nil, ErrSizeMismatch
}
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, src.Root())
if err != nil {
return nil, errors.Wrap(err, "new ro data column")
}
roSidecars = append(roSidecars, roSidecar)
}
dataColumnComputationTime.Observe(float64(time.Since(start).Milliseconds()))
return roSidecars, nil
}
// rotateRowsToCols takes a 2D slice of cells and proofs, where the x is rows (blobs) and y is columns,
// and returns a 2D slice where x is columns and y is rows.
func rotateRowsToCols(rows []kzg.CellsAndProofs, numCols uint64) ([][][]byte, [][][]byte, error) {
if len(rows) == 0 {
return nil, nil, nil
}
cellCols := make([][][]byte, numCols)
proofCols := make([][][]byte, numCols)
for i, cp := range rows {
if uint64(len(cp.Cells)) != numCols {
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough cells")
}
if len(cp.Cells) != len(cp.Proofs) {
return nil, nil, errors.Wrap(ErrNotEnoughDataColumnSidecars, "not enough proofs")
}
for j := uint64(0); j < numCols; j++ {
if i == 0 {
cellCols[j] = make([][]byte, len(rows))
proofCols[j] = make([][]byte, len(rows))
}
cellCols[j][i] = cp.Cells[j][:]
proofCols[j][i] = cp.Proofs[j][:]
}
}
return cellCols, proofCols, nil
}
// ConstructionPopulator is an interface that can be satisfied by a type that can use data from a struct
// like a DataColumnSidecar or a BeaconBlock to set the fields in a data column sidecar that cannot
// be obtained from the engine api.
type ConstructionPopulator interface {
Populate(*ethpb.DataColumnSidecar) error
Slot() primitives.Slot
Root() [32]byte
Commitments() [][]byte
Type() string
}
func PopulateFromSidecar(sidecar blocks.RODataColumn) *SidecarReconstructionSource {
return &SidecarReconstructionSource{RODataColumn: sidecar}
}
type SidecarReconstructionSource struct {
blocks.RODataColumn
}
func (s *SidecarReconstructionSource) Populate(dc *ethpb.DataColumnSidecar) error {
dc.SignedBlockHeader = s.SignedBlockHeader
dc.KzgCommitments = s.KzgCommitments
dc.KzgCommitmentsInclusionProof = s.KzgCommitmentsInclusionProof
return nil
}
func (s *SidecarReconstructionSource) Root() [32]byte {
return s.BlockRoot()
}
func (s *SidecarReconstructionSource) Commitments() [][]byte {
return s.KzgCommitments
}
func (s *SidecarReconstructionSource) Type() string {
return "DataColumnSidecar"
}
var _ ConstructionPopulator = (*SidecarReconstructionSource)(nil)
func PopulateFromBlock(block blocks.ROBlock) *BlockReconstructionSource {
return &BlockReconstructionSource{ROBlock: block}
}
type BlockReconstructionSource struct {
blocks.ROBlock
}
func (b *BlockReconstructionSource) Populate(dc *ethpb.DataColumnSidecar) error {
block := b.Block()
blockBody := block.Body()
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "blob KZG commitments")
}
dc.KzgCommitments = blobKzgCommitments
dc.SignedBlockHeader, err = b.Header()
if err != nil {
return errors.Wrap(err, "signed block header")
}
dc.KzgCommitmentsInclusionProof, err = blocks.MerkleProofKZGCommitments(blockBody)
if err != nil {
return errors.Wrap(err, "merkle proof KZG commitments")
}
return nil
}
func (s *BlockReconstructionSource) Slot() primitives.Slot {
return s.Block().Slot()
}
func (s *BlockReconstructionSource) Commitments() [][]byte {
c, err := s.Block().Body().BlobKzgCommitments()
if err != nil {
log.WithField("root", s.Root()).Trace("Unable to get kzg commitments from block")
}
return c
}
func (s *BlockReconstructionSource) Type() string {
return "BeaconBlock"
}
var _ ConstructionPopulator = (*BlockReconstructionSource)(nil)

View File

@@ -3,11 +3,15 @@ package peerdas_test
import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
)
func TestValidatorsCustodyRequirement(t *testing.T) {
@@ -53,3 +57,248 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
})
}
}
func TestDataColumnSidecarsFromBlock(t *testing.T) {
t.Run("sizes mismatch", func(t *testing.T) {
// Create a protobuf signed beacon block.
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
// Create cells and proofs.
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, params.BeaconConfig().NumberOfColumns),
Proofs: make([]kzg.Proof, params.BeaconConfig().NumberOfColumns),
},
}
rob, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
_, err = peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
})
t.Run("cells array too short for column index", func(t *testing.T) {
// Create a Fulu block with a blob commitment.
signedBeaconBlockPb := util.NewBeaconBlockFulu()
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
// Create cells and proofs with insufficient cells for the number of columns.
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, 10), // Only 10 cells
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
},
}
// This should fail because the function will try to access columns up to NumberOfColumns
// but we only have 10 cells/proofs.
rob, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
_, err = peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
})
t.Run("proofs array too short for column index", func(t *testing.T) {
// Create a Fulu block with a blob commitment.
signedBeaconBlockPb := util.NewBeaconBlockFulu()
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
// Create cells and proofs with sufficient cells but insufficient proofs.
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
},
}
// This should fail when trying to access proof beyond index 4.
rob, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
_, err = peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
require.ErrorContains(t, "not enough proofs", err)
})
t.Run("nominal", func(t *testing.T) {
// Create a Fulu block with blob commitments.
signedBeaconBlockPb := util.NewBeaconBlockFulu()
commitment1 := make([]byte, 48)
commitment2 := make([]byte, 48)
// Set different values to distinguish commitments
commitment1[0] = 0x01
commitment2[0] = 0x02
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{commitment1, commitment2}
// Create a signed beacon block from the protobuf.
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
require.NoError(t, err)
// Create cells and proofs with correct dimensions.
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, numberOfColumns),
},
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, numberOfColumns),
},
}
// Set distinct values in cells and proofs for testing
for i := range numberOfColumns {
cellsAndProofs[0].Cells[i][0] = byte(i)
cellsAndProofs[0].Proofs[i][0] = byte(i)
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
cellsAndProofs[1].Proofs[i][0] = byte(i + 128)
}
rob, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
sidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.NoError(t, err)
require.NotNil(t, sidecars)
require.Equal(t, int(numberOfColumns), len(sidecars))
// Verify each sidecar has the expected structure
for i, sidecar := range sidecars {
require.Equal(t, uint64(i), sidecar.Index)
require.Equal(t, 2, len(sidecar.Column))
require.Equal(t, 2, len(sidecar.KzgCommitments))
require.Equal(t, 2, len(sidecar.KzgProofs))
// Verify commitments match what we set
require.DeepEqual(t, commitment1, sidecar.KzgCommitments[0])
require.DeepEqual(t, commitment2, sidecar.KzgCommitments[1])
// Verify column data comes from the correct cells
require.Equal(t, byte(i), sidecar.Column[0][0])
require.Equal(t, byte(i+128), sidecar.Column[1][0])
// Verify proofs come from the correct proofs
require.Equal(t, byte(i), sidecar.KzgProofs[0][0])
require.Equal(t, byte(i+128), sidecar.KzgProofs[1][0])
}
})
}
func TestDataColumnSidecarsFromColumnSidecar(t *testing.T) {
// Create KZG commitments for 2 blobs
commitment1 := make([]byte, 48)
commitment2 := make([]byte, 48)
commitment1[0] = 0x01
commitment2[0] = 0x02
kzgCommitments := [][]byte{commitment1, commitment2}
// Create column data for 2 blobs
columnData := make([][]byte, 2)
columnData[0] = make([]byte, kzg.BytesPerCell)
columnData[1] = make([]byte, kzg.BytesPerCell)
columnData[0][0] = 0x11 // Distinct values
columnData[1][0] = 0x22
// Create KZG proofs for 2 blobs
kzgProofs := make([][]byte, 2)
kzgProofs[0] = make([]byte, 48)
kzgProofs[1] = make([]byte, 48)
kzgProofs[0][0] = 0x33
kzgProofs[1][0] = 0x44
// Create inclusion proof
inclusionProof := make([][]byte, 4)
for i := range inclusionProof {
inclusionProof[i] = make([]byte, 32)
inclusionProof[i][0] = byte(i + 0x50)
}
// Create the input VerifiedRODataColumn sidecar using test utility
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
{
Index: 5, // Column index 5
Column: columnData,
KzgCommitments: kzgCommitments,
KzgProofs: kzgProofs,
KzgCommitmentsInclusionProof: inclusionProof,
Slot: 42,
ProposerIndex: 7,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
})
require.Equal(t, 1, len(verifiedSidecars))
verifiedInputSidecar := verifiedSidecars[0]
// Create cells and proofs with correct dimensions for 2 blobs
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellsAndProofs := []kzg.CellsAndProofs{
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, numberOfColumns),
},
{
Cells: make([]kzg.Cell, numberOfColumns),
Proofs: make([]kzg.Proof, numberOfColumns),
},
}
// Set distinct values in cells and proofs for testing
for i := range numberOfColumns {
cellsAndProofs[0].Cells[i][0] = byte(i)
cellsAndProofs[0].Proofs[i][0] = byte(i + 10)
cellsAndProofs[1].Cells[i][0] = byte(i + 128)
cellsAndProofs[1].Proofs[i][0] = byte(i + 138)
}
// Call the function
sidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromSidecar(verifiedInputSidecar.RODataColumn))
require.NoError(t, err)
require.NotNil(t, sidecars)
require.Equal(t, int(numberOfColumns), len(sidecars))
// Verify each sidecar has the expected structure
for i, sidecar := range sidecars {
require.Equal(t, uint64(i), sidecar.Index)
require.Equal(t, 2, len(sidecar.Column))
require.Equal(t, 2, len(sidecar.KzgCommitments))
require.Equal(t, 2, len(sidecar.KzgProofs))
// Verify commitments match input
require.DeepEqual(t, commitment1, sidecar.KzgCommitments[0])
require.DeepEqual(t, commitment2, sidecar.KzgCommitments[1])
// Verify column data comes from the correct cells
require.Equal(t, byte(i), sidecar.Column[0][0])
require.Equal(t, byte(i+128), sidecar.Column[1][0])
// Verify proofs come from the correct proofs
require.Equal(t, byte(i+10), sidecar.KzgProofs[0][0])
require.Equal(t, byte(i+138), sidecar.KzgProofs[1][0])
// Verify inclusion proof is preserved
require.Equal(t, len(inclusionProof), len(sidecar.KzgCommitmentsInclusionProof))
for j, proof := range sidecar.KzgCommitmentsInclusionProof {
require.Equal(t, byte(j+0x50), proof[0])
}
// Verify signed block header is preserved
require.Equal(t, primitives.Slot(42), sidecar.SignedBlockHeader.Header.Slot)
require.Equal(t, primitives.ValidatorIndex(7), sidecar.SignedBlockHeader.Header.ProposerIndex)
}
}

View File

@@ -25,6 +25,7 @@ go_library(
"//testing/spectest:__subpackages__",
],
deps = [
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
@@ -103,6 +104,7 @@ go_test(
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",

View File

@@ -7,6 +7,7 @@ import (
"strings"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution/types"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
@@ -123,7 +124,7 @@ type Reconstructor interface {
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
) ([]interfaces.SignedBeaconBlock, error)
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte, hi func(uint64) bool) ([]blocks.VerifiedROBlob, error)
ReconstructDataColumnSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error)
ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error)
}
// EngineCaller defines a client that can interact with an Ethereum
@@ -651,22 +652,41 @@ func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.
return verifiedBlobs, nil
}
// ReconstructDataColumnSidecars reconstructs the verified data column sidecars for a given beacon block.
// It retrieves the KZG commitments from the block body, fetches the associated blobs and cell proofs from the EL,
// and constructs the corresponding verified read-only data column sidecars.
func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlock interfaces.ReadOnlySignedBeaconBlock, blockRoot [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
block := signedROBlock.Block()
func (s *Service) ConstructDataColumnSidecars(ctx context.Context, populator peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
root := populator.Root()
log := log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", blockRoot),
"slot": block.Slot(),
"root": fmt.Sprintf("%#x", root),
"slot": populator.Slot(),
})
kzgCommitments, err := block.Body().BlobKzgCommitments()
// Fetch cells and proofs from the execution client using the KZG commitments from the sidecar.
cellsAndProofs, err := s.cellsAndProofsForCommitments(ctx, populator.Commitments())
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "blob KZG commitments")
return nil, wrapWithBlockRoot(err, root, "fetch cells and proofs from execution client")
}
// Return early if nothing is returned from the EL.
if len(cellsAndProofs) == 0 {
return nil, nil
}
// Construct data column sidears from the signed block and cells and proofs.
roSidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, populator)
if err != nil {
return nil, wrapWithBlockRoot(err, populator.Root(), "data column sidcars from column sidecar")
}
// Upgrade the sidecars to verified sidecars.
// We trust the execution layer we are connected to, so we can upgrade the sidecar into a verified one.
verifiedROSidecars := upgradeSidecarsToVerifiedSidecars(roSidecars)
log.WithField("sourceType", populator.Type()).Debug("Data columns sidecars constructed from the execution client")
return verifiedROSidecars, nil
}
// cellsAndProofsForCommitments fetches cells and proofs from the execution client (using engine_getBlobsV2 execution API method)
func (s *Service) cellsAndProofsForCommitments(ctx context.Context, kzgCommitments [][]byte) ([]kzg.CellsAndProofs, error) {
// Collect KZG hashes for all blobs.
versionedHashes := make([]common.Hash, 0, len(kzgCommitments))
for _, commitment := range kzgCommitments {
@@ -677,12 +697,11 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
// Fetch all blobsAndCellsProofs from the execution client.
blobAndProofV2s, err := s.GetBlobsV2(ctx, versionedHashes)
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "get blobs V2")
return nil, errors.Wrapf(err, "get blobs V2")
}
// Return early if nothing is returned from the EL.
if len(blobAndProofV2s) == 0 {
log.Debug("No blobs returned from execution client")
return nil, nil
}
@@ -690,34 +709,31 @@ func (s *Service) ReconstructDataColumnSidecars(ctx context.Context, signedROBlo
blobs, cellProofs := make([][]byte, 0, len(blobAndProofV2s)), make([][]byte, 0, len(blobAndProofV2s))
for _, blobsAndProofs := range blobAndProofV2s {
if blobsAndProofs == nil {
return nil, wrapWithBlockRoot(errMissingBlobsAndProofsFromEL, blockRoot, "")
return nil, errMissingBlobsAndProofsFromEL
}
blobs, cellProofs = append(blobs, blobsAndProofs.Blob), append(cellProofs, blobsAndProofs.KzgProofs...)
blobs = append(blobs, blobsAndProofs.Blob)
cellProofs = append(cellProofs, blobsAndProofs.KzgProofs...)
}
// Construct the data column sidcars from the blobs and cell proofs provided by the execution client.
dataColumnSidecars, err := peerdas.ConstructDataColumnSidecars(signedROBlock, blobs, cellProofs)
// Compute cells and proofs from the blobs and cell proofs.
cellsAndProofs, err := peerdas.ComputeCellsAndProofs(blobs, cellProofs)
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "construct data column sidecars")
return nil, errors.Wrap(err, "compute cells and proofs")
}
// Finally, construct verified RO data column sidecars.
// We trust the execution layer we are connected to, so we can upgrade the read only data column sidecar into a verified one.
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecars))
for _, dataColumnSidecar := range dataColumnSidecars {
roDataColumn, err := blocks.NewRODataColumnWithRoot(dataColumnSidecar, blockRoot)
if err != nil {
return nil, wrapWithBlockRoot(err, blockRoot, "new read-only data column with root")
}
return cellsAndProofs, nil
}
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
// upgradeSidecarsToVerifiedSidecars upgrades a list of data column sidecars into verified data column sidecars.
func upgradeSidecarsToVerifiedSidecars(roSidecars []blocks.RODataColumn) []blocks.VerifiedRODataColumn {
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
for _, roSidecar := range roSidecars {
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
}
log.Debug("Data columns successfully reconstructed from the execution client")
return verifiedRODataColumns, nil
return verifiedRODataColumns
}
func fullPayloadFromPayloadBody(
@@ -1009,6 +1025,6 @@ func toBlockNumArg(number *big.Int) string {
}
// wrapWithBlockRoot returns a new error with the given block root.
func wrapWithBlockRoot(err error, blockRoot [32]byte, message string) error {
func wrapWithBlockRoot(err error, blockRoot [fieldparams.RootLength]byte, message string) error {
return errors.Wrap(err, fmt.Sprintf("%s for block %#x", message, blockRoot))
}

View File

@@ -14,6 +14,7 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
mocks "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
@@ -2556,7 +2557,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
})
}
func TestReconstructDataColumnSidecars(t *testing.T) {
func TestConstructDataColumnSidecarsFromBlock(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
@@ -2580,11 +2581,14 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
sb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
roBlock, err := blocks.NewROBlockWithRoot(sb, r)
require.NoError(t, err)
ctx := context.Background()
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
_, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
require.ErrorContains(t, "get blobs V2 for block", err)
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
})
t.Run("nothing received", func(t *testing.T) {
@@ -2594,7 +2598,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
require.Equal(t, 0, len(dataColumns))
})
@@ -2607,7 +2611,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
require.Equal(t, 128, len(dataColumns))
})
@@ -2620,10 +2624,134 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ReconstructDataColumnSidecars(ctx, sb, r)
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
})
}
func TestConstructDataColumnSidecarsFromSidecar(t *testing.T) {
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Setup right fork epoch
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.CapellaForkEpoch = 1
cfg.DenebForkEpoch = 2
cfg.ElectraForkEpoch = 3
cfg.FuluForkEpoch = 4
params.OverrideBeaconConfig(cfg)
client := &Service{capabilityCache: &capabilityCache{}}
// Create KZG commitments for the sidecar
kzgCommitments := createRandomKzgCommitments(t, 3)
// Create a test verified data column sidecar
columnData := make([][]byte, 3)
for i := range columnData {
columnData[i] = make([]byte, kzg.BytesPerCell)
columnData[i][0] = byte(i + 0x10)
}
kzgProofs := make([][]byte, 3)
for i := range kzgProofs {
kzgProofs[i] = make([]byte, 48)
kzgProofs[i][0] = byte(i + 0x20)
}
inclusionProof := make([][]byte, 4)
for i := range inclusionProof {
inclusionProof[i] = make([]byte, 32)
inclusionProof[i][0] = byte(i + 0x30)
}
// Create the input VerifiedRODataColumn sidecar using test utility
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
{
Index: 7, // Column index 7
Column: columnData,
KzgCommitments: kzgCommitments,
KzgProofs: kzgProofs,
KzgCommitmentsInclusionProof: inclusionProof,
Slot: 4 * params.BeaconConfig().SlotsPerEpoch, // Fulu epoch
ProposerIndex: 9,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
})
require.Equal(t, 1, len(verifiedSidecars))
sidecar := verifiedSidecars[0]
ctx := context.Background()
t.Run("GetBlobsV2 is not supported", func(t *testing.T) {
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromSidecar(sidecar.RODataColumn))
require.NotNil(t, err)
require.ErrorContains(t, "engine_getBlobsV2 is not supported", err)
})
t.Run("nothing received", func(t *testing.T) {
srv := createBlobServerV2(t, 0, []bool{})
defer srv.Close()
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromSidecar(sidecar.RODataColumn))
require.NoError(t, err)
require.Equal(t, 0, len(dataColumns))
})
t.Run("receiving all blobs", func(t *testing.T) {
blobMasks := []bool{true, true, true}
srv := createBlobServerV2(t, 3, blobMasks)
defer srv.Close()
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
dataColumns, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromSidecar(sidecar.RODataColumn))
require.NoError(t, err)
require.Equal(t, 128, len(dataColumns)) // NumberOfColumns
// Verify each sidecar has expected structure
for i, sidecar := range dataColumns {
require.Equal(t, uint64(i), sidecar.Index)
require.Equal(t, 3, len(sidecar.Column)) // 3 blobs
require.Equal(t, 3, len(sidecar.KzgCommitments)) // 3 commitments
require.Equal(t, 3, len(sidecar.KzgProofs)) // 3 proofs per column
// Verify commitments are preserved
for j, commitment := range sidecar.KzgCommitments {
require.DeepEqual(t, kzgCommitments[j], commitment)
}
// Verify inclusion proof is preserved
require.Equal(t, len(inclusionProof), len(sidecar.KzgCommitmentsInclusionProof))
for j, proof := range sidecar.KzgCommitmentsInclusionProof {
require.Equal(t, byte(j+0x30), proof[0])
}
// Verify signed block header is preserved
require.Equal(t, primitives.Slot(4*params.BeaconConfig().SlotsPerEpoch), sidecar.SignedBlockHeader.Header.Slot)
require.Equal(t, primitives.ValidatorIndex(9), sidecar.SignedBlockHeader.Header.ProposerIndex)
}
})
t.Run("missing some blobs", func(t *testing.T) {
blobMasks := []bool{false, true, true}
srv := createBlobServerV2(t, 3, blobMasks)
defer srv.Close()
rpcClient, client := setupRpcClientV2(t, srv.URL, client)
defer rpcClient.Close()
_, err := client.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromSidecar(sidecar.RODataColumn))
require.ErrorContains(t, errMissingBlobsAndProofsFromEL.Error(), err)
})
}
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {

View File

@@ -14,6 +14,7 @@ go_library(
],
deps = [
"//async/event:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/execution/types:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",

View File

@@ -4,6 +4,7 @@ import (
"context"
"math/big"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
@@ -116,7 +117,8 @@ func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadO
return e.BlobSidecars, e.ErrorBlobSidecars
}
func (e *EngineClient) ReconstructDataColumnSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [fieldparams.RootLength]byte) ([]blocks.VerifiedRODataColumn, error) {
// ConstructDataColumnSidecars is a mock implementation of the ConstructDataColumnSidecars method.
func (e *EngineClient) ConstructDataColumnSidecars(context.Context, peerdas.ConstructionPopulator) ([]blocks.VerifiedRODataColumn, error) {
return e.DataColumnSidecars, e.ErrorDataColumnSidecars
}

View File

@@ -941,6 +941,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
FinalizationFetcher: chainService,
BlockReceiver: chainService,
BlobReceiver: chainService,
DataColumnReceiver: chainService,
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,

View File

@@ -60,6 +60,7 @@ go_library(
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/wrapper:go_default_library",

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/crypto/hash"
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
@@ -308,19 +309,13 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
// broadcasted to the current fork and to the input column subnet.
func (s *Service) BroadcastDataColumnSidecar(
root [fieldparams.RootLength]byte,
dataColumnSubnet uint64,
dataColumnSidecar *ethpb.DataColumnSidecar,
dataColumnSidecar blocks.VerifiedRODataColumn,
) error {
// Add tracing to the function.
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
defer span.End()
// Ensure the data column sidecar is not nil.
if dataColumnSidecar == nil {
return errors.Errorf("attempted to broadcast nil data column sidecar at subnet %d", dataColumnSubnet)
}
// Retrieve the current fork digest.
forkDigest, err := s.currentForkDigest()
if err != nil {
@@ -330,16 +325,15 @@ func (s *Service) BroadcastDataColumnSidecar(
}
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
go s.internalBroadcastDataColumnSidecar(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
go s.internalBroadcastDataColumnSidecar(ctx, dataColumnSubnet, dataColumnSidecar, forkDigest)
return nil
}
func (s *Service) internalBroadcastDataColumnSidecar(
ctx context.Context,
root [fieldparams.RootLength]byte,
columnSubnet uint64,
dataColumnSidecar *ethpb.DataColumnSidecar,
dataColumnSidecar blocks.VerifiedRODataColumn,
forkDigest [fieldparams.VersionLength]byte,
) {
// Add tracing to the function.
@@ -385,7 +379,7 @@ func (s *Service) internalBroadcastDataColumnSidecar(
log.WithFields(logrus.Fields{
"slot": slot,
"timeSinceSlotStart": time.Since(slotStartTime),
"root": fmt.Sprintf("%#x", root),
"root": fmt.Sprintf("%#x", dataColumnSidecar.BlockRoot()),
"columnSubnet": columnSubnet,
}).Debug("Broadcasted data column sidecar")

View File

@@ -711,13 +711,8 @@ func TestService_BroadcastDataColumn(t *testing.T) {
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
sidecar := roSidecars[0].DataColumnSidecar
// Attempt to broadcast nil object should fail.
var emptyRoot [fieldparams.RootLength]byte
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, nil)
require.ErrorContains(t, "attempted to broadcast nil", err)
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
verifiedRoSidecar := verifiedRoSidecars[0]
// Subscribe to the topic.
sub, err := p2.SubscribeToTopic(topic)
@@ -727,7 +722,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
time.Sleep(50 * time.Millisecond)
// Broadcast to peers and wait.
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, sidecar)
err = service.BroadcastDataColumnSidecar(subnet, verifiedRoSidecar)
require.NoError(t, err)
// Receive the message.
@@ -739,5 +734,5 @@ func TestService_BroadcastDataColumn(t *testing.T) {
var result ethpb.DataColumnSidecar
require.NoError(t, service.Encoding().DecodeGossip(msg.Data, &result))
require.DeepEqual(t, &result, sidecar)
require.DeepEqual(t, &result, verifiedRoSidecar)
}

View File

@@ -155,6 +155,7 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
log := log.WithFields(logrus.Fields{
"peerID": pid,
"defaultValue": custodyRequirement,
"agent": agentString(pid, s.Host()),
})
// Retrieve the ENR of the peer.

View File

@@ -8,6 +8,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
@@ -269,6 +270,7 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
service := &Service{
peers: peers,
metaData: tc.metadata,
host: testp2p.NewTestP2P(t).Host(),
}
// Retrieve the custody count from the remote peer.
@@ -329,6 +331,7 @@ func TestCustodyGroupCountFromPeerENR(t *testing.T) {
service := &Service{
peers: peers,
host: testp2p.NewTestP2P(t).Host(),
}
actual := service.custodyGroupCountFromPeerENR(pid)

View File

@@ -684,7 +684,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
peerData, multiAddrs, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).Debug("Could not convert to peer data")
log.WithError(err).WithField("node", node.String()).Debug("Could not convert to peer data")
return false
}
@@ -851,7 +851,7 @@ func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
multiAddrs, err := retrieveMultiAddrsFromNode(node)
if err != nil {
return nil, nil, err
return nil, nil, errors.Wrap(err, "retrieve multiaddrs from node")
}
if len(multiAddrs) == 0 {

View File

@@ -6,6 +6,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -51,7 +52,7 @@ type (
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
BroadcastDataColumnSidecar(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
BroadcastDataColumnSidecar(columnSubnet uint64, dataColumnSidecar blocks.VerifiedRODataColumn) error
}
// SetStreamHandler configures p2p to handle streams of a certain topic ID.

View File

@@ -25,6 +25,7 @@ go_library(
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -6,6 +6,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -168,7 +169,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
}
// BroadcastDataColumnSidecar -- fake.
func (*FakeP2P) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
func (*FakeP2P) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
return nil
}

View File

@@ -5,7 +5,7 @@ import (
"sync"
"sync/atomic"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"google.golang.org/protobuf/proto"
@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
}
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (m *MockBroadcaster) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
func (m *MockBroadcaster) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
m.BroadcastCalled.Store(true)
return nil
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
@@ -63,6 +64,7 @@ type TestP2P struct {
custodyInfoMut sync.RWMutex // protects custodyGroupCount and earliestAvailableSlot
earliestAvailableSlot primitives.Slot
custodyGroupCount uint64
enr *enr.Record
}
// NewTestP2P initializes a new p2p test service.
@@ -103,6 +105,7 @@ func NewTestP2P(t *testing.T, userOptions ...config.Option) *TestP2P {
pubsub: ps,
joinedTopics: map[string]*pubsub.Topic{},
peers: peerStatuses,
enr: new(enr.Record),
}
}
@@ -229,7 +232,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
}
// BroadcastDataColumnSidecar broadcasts a data column for mock.
func (p *TestP2P) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
func (p *TestP2P) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
p.BroadcastCalled.Store(true)
return nil
}
@@ -310,8 +313,8 @@ func (p *TestP2P) Host() host.Host {
}
// ENR returns the enr of the local peer.
func (*TestP2P) ENR() *enr.Record {
return new(enr.Record)
func (p *TestP2P) ENR() *enr.Record {
return p.enr
}
// NodeID returns the node id of the local peer.

View File

@@ -18,6 +18,7 @@ go_library(
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
@@ -60,7 +61,6 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -84,6 +84,7 @@ go_test(
"//api:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/signing:go_default_library",
@@ -124,7 +125,6 @@ go_test(
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",

View File

@@ -13,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
corehelpers "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
@@ -32,7 +33,6 @@ import (
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/sirupsen/logrus"
@@ -942,14 +942,13 @@ func decodePhase0JSON(body []byte) (*eth.GenericSignedBeaconBlock, error) {
// broadcastSidecarsIfSupported broadcasts blob sidecars when an equivocated block occurs.
func broadcastSidecarsIfSupported(ctx context.Context, s *Server, b interfaces.SignedBeaconBlock, gb *eth.GenericSignedBeaconBlock, versionHeader string) error {
switch versionHeader {
case version.String(version.Fulu):
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetFulu().Blobs, gb.GetFulu().KzgProofs)
case version.String(version.Electra):
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetElectra().Blobs, gb.GetElectra().KzgProofs)
case version.String(version.Deneb):
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetDeneb().Blobs, gb.GetDeneb().KzgProofs)
default:
// other forks before Deneb do not support blob sidecars
// forks after fulu do not support blob sidecars, instead support data columns, no need to rebroadcast
return nil
}
}
@@ -1053,7 +1052,7 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
return nil
}
if err := s.validateBlobSidecars(blk, blobs, proofs); err != nil {
if err := s.validateBlobs(blk, blobs, proofs); err != nil {
return err
}
@@ -1067,23 +1066,41 @@ func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
return nil
}
func (s *Server) validateBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
func (s *Server) validateBlobs(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
if blk.Version() < version.Deneb {
return nil
}
kzgs, err := blk.Block().Body().BlobKzgCommitments()
numberOfColumns := params.BeaconConfig().NumberOfColumns
commitments, err := blk.Block().Body().BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "could not get blob kzg commitments")
}
if len(blobs) != len(proofs) || len(blobs) != len(kzgs) {
return errors.New("number of blobs, proofs, and commitments do not match")
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blk.Block().Slot())
if len(blobs) > maxBlobsPerBlock {
return fmt.Errorf("number of blobs over max, %d > %d", len(blobs), maxBlobsPerBlock)
}
for i, blob := range blobs {
b := kzg4844.Blob(blob)
if err := kzg4844.VerifyBlobProof(&b, kzg4844.Commitment(kzgs[i]), kzg4844.Proof(proofs[i])); err != nil {
return errors.Wrap(err, "could not verify blob proof")
if blk.Version() >= version.Fulu {
// For Fulu blocks, proofs are cell proofs (blobs * numberOfColumns)
expectedProofsCount := uint64(len(blobs)) * numberOfColumns
if uint64(len(proofs)) != expectedProofsCount || len(blobs) != len(commitments) {
return fmt.Errorf("number of blobs (%d), cell proofs (%d), and commitments (%d) do not match (expected %d cell proofs)", len(blobs), len(proofs), len(commitments), expectedProofsCount)
}
// For Fulu blocks, proofs are cell proofs from execution client's BlobsBundleV2
// Verify cell proofs directly without reconstructing data column sidecars
if err := kzg.VerifyCellKZGProofBatchFromBlobData(blobs, commitments, proofs, numberOfColumns); err != nil {
return errors.Wrap(err, "could not verify cell proofs")
}
} else {
// For pre-Fulu blocks, proofs are blob proofs (1:1 with blobs)
if len(blobs) != len(proofs) || len(blobs) != len(commitments) {
return errors.Errorf("number of blobs (%d), proofs (%d), and commitments (%d) do not match", len(blobs), len(proofs), len(commitments))
}
// Use batch verification for better performance
if err := kzg.VerifyBlobKZGProofBatch(blobs, commitments, proofs); err != nil {
return errors.Wrap(err, "could not verify blob proofs")
}
}
return nil
}
@@ -1627,6 +1644,8 @@ func (s *Server) broadcastSeenBlockSidecars(
if err != nil {
return err
}
// Broadcast blob sidecars with forkchoice checking
for _, sc := range scs {
r, err := sc.SignedBlockHeader.Header.HashTreeRoot()
if err != nil {

View File

@@ -14,6 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
chainMock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
@@ -40,7 +41,6 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
GoKZG "github.com/crate-crypto/go-kzg-4844"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
@@ -4781,25 +4781,329 @@ func TestServer_broadcastBlobSidecars(t *testing.T) {
require.LogsContain(t, hook, "Broadcasted blob sidecar for already seen block")
}
func Test_validateBlobSidecars(t *testing.T) {
func Test_validateBlobs(t *testing.T) {
require.NoError(t, kzg.Start())
blob := util.GetRandBlob(123)
commitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
proof := GoKZG.KZGProof{128, 110, 116, 170, 56, 111, 126, 87, 229, 234, 211, 42, 110, 150, 129, 206, 73, 142, 167, 243, 90, 149, 240, 240, 236, 204, 143, 182, 229, 249, 81, 27, 153, 171, 83, 70, 144, 250, 42, 1, 188, 215, 71, 235, 30, 7, 175, 86}
// Generate proper commitment and proof for the blob
var kzgBlob kzg.Blob
copy(kzgBlob[:], blob[:])
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
require.NoError(t, err)
proof, err := kzg.ComputeBlobKZGProof(&kzgBlob, commitment)
require.NoError(t, err)
blk := util.NewBeaconBlockDeneb()
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
require.NoError(t, s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
require.ErrorContains(t, "number of blobs, proofs, and commitments do not match", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{}))
require.ErrorContains(t, "number of blobs (1), proofs (0), and commitments (1) do not match", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{}))
sk, err := bls.RandKey()
require.NoError(t, err)
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
b, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.ErrorContains(t, "could not verify blob proof: can't verify opening proof", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
require.ErrorContains(t, "could not verify blob proofs", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
blobs := [][]byte{}
commitments := [][]byte{}
proofs := [][]byte{}
for i := 0; i < 10; i++ {
blobs = append(blobs, blob[:])
commitments = append(commitments, commitment[:])
proofs = append(proofs, proof[:])
}
t.Run("pre-Deneb block should return early", func(t *testing.T) {
// Create a pre-Deneb block (e.g., Capella)
blk := util.NewBeaconBlockCapella()
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should return nil for pre-Deneb blocks regardless of blobs
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
require.NoError(t, s.validateBlobs(b, blobs[:1], proofs[:1]))
})
t.Run("Deneb block with valid single blob", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
})
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 100
testCfg.DeprecatedMaxBlobsPerBlock = 6
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 10 // Deneb slot
blk.Block.Body.BlobKzgCommitments = commitments[:6]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should pass with exactly 6 blobs
require.NoError(t, s.validateBlobs(b, blobs[:6], proofs[:6]))
})
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 100
testCfg.DeprecatedMaxBlobsPerBlock = 6
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 10 // Deneb slot
blk.Block.Body.BlobKzgCommitments = commitments[:7]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should fail with 7 blobs when max is 6
err = s.validateBlobs(b, blobs[:7], proofs[:7])
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
})
t.Run("Electra block with valid blobs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
// Set up Electra config with max 9 blobs
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockElectra()
blk.Block.Slot = 160 // Electra slot (epoch 5+)
blk.Block.Body.BlobKzgCommitments = commitments[:9]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should pass with 9 blobs in Electra
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
})
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
// Set up Electra config with max 9 blobs
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockElectra()
blk.Block.Slot = 160 // Electra slot
blk.Block.Body.BlobKzgCommitments = commitments[:10]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should fail with 10 blobs when max is 9
err = s.validateBlobs(b, blobs[:10], proofs[:10])
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
})
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.FuluForkEpoch = 10
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
testCfg.NumberOfColumns = 128 // Standard PeerDAS configuration
params.OverrideBeaconConfig(testCfg)
// Create Fulu block with proper cell proofs
blk := util.NewBeaconBlockFulu()
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
// Generate valid commitments and cell proofs for testing
blobCount := 2
commitments := make([][]byte, blobCount)
fuluBlobs := make([][]byte, blobCount)
var kzgBlobs []kzg.Blob
for i := 0; i < blobCount; i++ {
blob := util.GetRandBlob(int64(i))
fuluBlobs[i] = blob[:]
var kzgBlob kzg.Blob
copy(kzgBlob[:], blob[:])
kzgBlobs = append(kzgBlobs, kzgBlob)
// Generate commitment
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
require.NoError(t, err)
commitments[i] = commitment[:]
}
blk.Block.Body.BlobKzgCommitments = commitments
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// Generate cell proofs for the blobs (flattened format like execution client)
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
for blobIdx := 0; blobIdx < blobCount; blobIdx++ {
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlobs[blobIdx])
require.NoError(t, err)
for colIdx := uint64(0); colIdx < numberOfColumns; colIdx++ {
cellProofIdx := uint64(blobIdx)*numberOfColumns + colIdx
cellProofs[cellProofIdx] = cellsAndProofs.Proofs[colIdx][:]
}
}
s := &Server{}
// Should use cell batch verification for Fulu blocks
require.NoError(t, s.validateBlobs(b, fuluBlobs, cellProofs))
})
t.Run("Fulu block with invalid cell proof count", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.FuluForkEpoch = 10
testCfg.NumberOfColumns = 128
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockFulu()
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
// Create valid commitments but wrong number of cell proofs
blobCount := 2
commitments := make([][]byte, blobCount)
fuluBlobs := make([][]byte, blobCount)
for i := 0; i < blobCount; i++ {
blob := util.GetRandBlob(int64(i))
fuluBlobs[i] = blob[:]
var kzgBlob kzg.Blob
copy(kzgBlob[:], blob[:])
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
require.NoError(t, err)
commitments[i] = commitment[:]
}
blk.Block.Body.BlobKzgCommitments = commitments
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// Wrong number of cell proofs (should be blobCount * numberOfColumns)
wrongCellProofs := make([][]byte, 10) // Too few proofs
s := &Server{}
err = s.validateBlobs(b, fuluBlobs, wrongCellProofs)
require.ErrorContains(t, "do not match", err)
})
t.Run("Deneb block with invalid blob proof", func(t *testing.T) {
blob := util.GetRandBlob(123)
invalidProof := make([]byte, 48) // All zeros - invalid proof
sk, err := bls.RandKey()
require.NoError(t, err)
blk := util.NewBeaconBlockDeneb()
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
err = s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{invalidProof})
require.ErrorContains(t, "could not verify blob proofs", err)
})
t.Run("empty blobs and proofs should pass", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Body.BlobKzgCommitments = [][]byte{}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
})
t.Run("BlobSchedule with progressive increases (BPO)", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 100
testCfg.FuluForkEpoch = 200
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
// Define blob schedule with progressive increases
testCfg.BlobSchedule = []params.BlobScheduleEntry{
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
}
params.OverrideBeaconConfig(testCfg)
s := &Server{}
// Test epoch 0-9: max 3 blobs
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 5 // Epoch 0
blk.Block.Body.BlobKzgCommitments = commitments[:3]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.validateBlobs(b, blobs[:3], proofs[:3]))
// Should fail with 4 blobs
blk.Block.Body.BlobKzgCommitments = commitments[:4]
b, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
err = s.validateBlobs(b, blobs[:4], proofs[:4])
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
})
// Test epoch 30+: max 9 blobs
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 960 // Epoch 30
blk.Block.Body.BlobKzgCommitments = commitments[:9]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
// Should fail with 10 blobs
blk.Block.Body.BlobKzgCommitments = commitments[:10]
b, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
err = s.validateBlobs(b, blobs[:10], proofs[:10])
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
})
})
}
func TestGetPendingConsolidations(t *testing.T) {

View File

@@ -1029,8 +1029,8 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
httputil.HandleError(w, fmt.Sprintf("Could not get head state: %v ", err), http.StatusInternalServerError)
return
}
// Advance state with empty transitions up to the requested epoch start slot for pre fulu state only. Fulu state utilizes proposer look ahead field.
if st.Slot() < epochStartSlot && st.Version() != version.Fulu {
// Notice that even for Fulu requests for the next epoch, we are only advancing the state to the start of the current epoch.
if st.Slot() < epochStartSlot {
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
httputil.HandleError(w, fmt.Sprintf("Could not get head root: %v ", err), http.StatusInternalServerError)

View File

@@ -2645,78 +2645,6 @@ func TestGetProposerDuties(t *testing.T) {
})
}
func TestGetProposerDuties_FuluState(t *testing.T) {
helpers.ClearCache()
// Create a Fulu state with slot 0 (before epoch 1 start slot which is 32)
fuluState, err := util.NewBeaconStateFulu()
require.NoError(t, err)
require.NoError(t, fuluState.SetSlot(0)) // Set to slot 0
// Create some validators for the test
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
require.NoError(t, err)
validators := make([]*ethpbalpha.Validator, len(deposits))
for i, deposit := range deposits {
validators[i] = &ethpbalpha.Validator{
PublicKey: deposit.Data.PublicKey,
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawalCredentials: make([]byte, 32),
}
}
require.NoError(t, fuluState.SetValidators(validators))
// Set up block roots
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
roots := make([][]byte, fieldparams.BlockRootsLength)
roots[0] = genesisRoot[:]
require.NoError(t, fuluState.SetBlockRoots(roots))
chainSlot := primitives.Slot(0)
chain := &mockChain.ChainService{
State: fuluState, Root: genesisRoot[:], Slot: &chainSlot,
}
db := dbutil.SetupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: fuluState}},
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
PayloadIDCache: cache.NewPayloadIDCache(),
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
BeaconDB: db,
}
// Request epoch 1 duties, which should require advancing from slot 0 to slot 32
// But for Fulu state, this advancement should be skipped
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
request.SetPathValue("epoch", "1")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetProposerDuties(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
// Verify the state was not advanced - it should still be at slot 0
// This is the key assertion for the regression test
assert.Equal(t, primitives.Slot(0), fuluState.Slot(), "Fulu state should not have been advanced")
resp := &structs.GetProposerDutiesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
// Should still return proposer duties despite not advancing the state
assert.Equal(t, true, len(resp.Data) > 0, "Should return proposer duties even without state advancement")
}
func TestGetSyncCommitteeDuties(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)

View File

@@ -220,16 +220,13 @@ func TestGetBlob(t *testing.T) {
cellsAndProofsList = append(cellsAndProofsList, cellsAndProogs)
}
dataColumnSidecarPb, err := peerdas.DataColumnSidecars(fuluBlock, cellsAndProofsList)
roDataColumnSidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofsList, peerdas.PopulateFromBlock(fuluBlock))
require.NoError(t, err)
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(dataColumnSidecarPb))
for _, sidecarPb := range dataColumnSidecarPb {
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecarPb, fuluBlockRoot)
require.NoError(t, err)
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumn)
verifiedRoDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
for _, roDataColumnSidecar := range roDataColumnSidecars {
verifiedRoDataColumnSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, verifiedRoDataColumnSidecar)
}
err = db.SaveBlock(t.Context(), fuluBlock)

View File

@@ -39,6 +39,7 @@ go_library(
"//api/client/builder:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
@@ -49,6 +50,7 @@ go_library(
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",

View File

@@ -15,9 +15,11 @@ import (
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -58,28 +60,31 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
if err != nil {
log.WithError(err).Error("Could not convert slot to time")
}
log.WithFields(logrus.Fields{
"slot": req.Slot,
"sinceSlotStartTime": time.Since(t),
}).Info("Begin building block")
log := log.WithField("slot", req.Slot)
log.WithField("sinceSlotStartTime", time.Since(t)).Info("Begin building block")
// A syncing validator should not produce a block.
if vs.SyncChecker.Syncing() {
log.Error("Fail to build block: node is syncing")
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
if err := vs.optimisticStatus(ctx); err != nil {
log.WithError(err).Error("Fail to build block: node is optimistic")
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
}
}
head, parentRoot, err := vs.getParentState(ctx, req.Slot)
if err != nil {
log.WithError(err).Error("Fail to build block: could not get parent state")
return nil, err
}
sBlk, err := getEmptyBlock(req.Slot)
if err != nil {
log.WithError(err).Error("Fail to build block: could not get empty block")
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
}
// Set slot, graffiti, randao reveal, and parent root.
@@ -101,8 +106,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
}
resp, err := vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost, builderBoostFactor)
log := log.WithFields(logrus.Fields{
"slot": req.Slot,
log = log.WithFields(logrus.Fields{
"sinceSlotStartTime": time.Since(t),
"validator": sBlk.Block().ProposerIndex(),
})
@@ -275,6 +279,11 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
//
// ProposeBeaconBlock handles the proposal of beacon blocks.
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
var (
blobSidecars []*ethpb.BlobSidecar
dataColumnSidecars []blocks.RODataColumn
)
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
defer span.End()
@@ -300,11 +309,11 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
var sidecars []*ethpb.BlobSidecar
rob, err := blocks.NewROBlockWithRoot(block, root)
if block.IsBlinded() {
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
} else if block.Version() >= version.Deneb {
sidecars, err = vs.blobSidecarsFromUnblindedBlock(block, req)
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(rob, req)
}
if err != nil {
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
@@ -323,10 +332,9 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
errChan <- nil
}()
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
}
wg.Wait()
if err := <-errChan; err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
@@ -335,12 +343,35 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
// broadcastAndReceiveSidecars broadcasts and receives sidecars.
func (vs *Server) broadcastAndReceiveSidecars(
ctx context.Context,
block interfaces.SignedBeaconBlock,
root [fieldparams.RootLength]byte,
blobSidecars []*ethpb.BlobSidecar,
dataColumnSidecars []blocks.RODataColumn,
) error {
if block.Version() >= version.Fulu {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, root); err != nil {
return errors.Wrap(err, "broadcast and receive data columns")
}
return nil
}
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
return errors.Wrap(err, "broadcast and receive blobs")
}
return nil
}
// handleBlindedBlock processes blinded beacon blocks (pre-Fulu only).
// Post-Fulu blinded blocks are handled directly in ProposeBeaconBlock.
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
if block.Version() < version.Bellatrix {
return nil, nil, errors.New("pre-Bellatrix blinded block")
}
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
return nil, nil, errors.New("unconfigured block builder")
}
@@ -367,16 +398,41 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
return copiedBlock, sidecars, nil
}
func (vs *Server) blobSidecarsFromUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
func (vs *Server) handleUnblindedBlock(
block blocks.ROBlock,
req *ethpb.GenericSignedBeaconBlock,
) ([]*ethpb.BlobSidecar, []blocks.RODataColumn, error) {
rawBlobs, proofs, err := blobsAndProofs(req)
if err != nil {
return nil, err
return nil, nil, err
}
return BuildBlobSidecars(block, rawBlobs, proofs)
if block.Version() >= version.Fulu {
// Compute cells and proofs from the blobs and cell proofs.
cellsAndProofs, err := peerdas.ComputeCellsAndProofs(rawBlobs, proofs)
if err != nil {
return nil, nil, errors.Wrap(err, "compute cells and proofs")
}
// Construct data column sidecars from the signed block and cells and proofs.
roDataColumnSidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(block))
if err != nil {
return nil, nil, errors.Wrap(err, "data column sidcars")
}
return nil, roDataColumnSidecars, nil
}
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
if err != nil {
return nil, nil, errors.Wrap(err, "build blob sidecars")
}
return blobSidecars, nil, nil
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
protoBlock, err := block.Proto()
if err != nil {
return errors.Wrap(err, "protobuf conversion failed")
@@ -392,18 +448,14 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
}
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
eg, eCtx := errgroup.WithContext(ctx)
for i, sc := range sidecars {
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
subIdx := i
sCar := sc
for subIdx, sc := range sidecars {
eg.Go(func() error {
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sCar); err != nil {
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sc); err != nil {
return errors.Wrap(err, "broadcast blob failed")
}
readOnlySc, err := blocks.NewROBlobWithRoot(sCar, root)
readOnlySc, err := blocks.NewROBlobWithRoot(sc, root)
if err != nil {
return errors.Wrap(err, "ROBlob creation failed")
}
@@ -421,6 +473,48 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
return eg.Wait()
}
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
func (vs *Server) broadcastAndReceiveDataColumns(
ctx context.Context,
roSidecars []blocks.RODataColumn,
root [fieldparams.RootLength]byte,
) error {
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
eg, _ := errgroup.WithContext(ctx)
for _, roSidecar := range roSidecars {
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
eg.Go(func() error {
// Compute the subnet index based on the column index.
subnet := peerdas.ComputeSubnetForDataColumnSidecar(roSidecar.Index)
if err := vs.P2P.BroadcastDataColumnSidecar(subnet, verifiedRODataColumn); err != nil {
return errors.Wrap(err, "broadcast data column")
}
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.Wrap(err, "wait for data columns to be broadcasted")
}
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
return errors.Wrap(err, "receive data column")
}
for _, verifiedRODataColumn := range verifiedRODataColumns {
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.DataColumnSidecarReceived,
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
})
}
return nil
}
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
//
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.

View File

@@ -10,7 +10,7 @@ import (
)
// BuildBlobSidecars given a block, builds the blob sidecars for the block.
func BuildBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
func BuildBlobSidecars(blk interfaces.ReadOnlySignedBeaconBlock, blobs [][]byte, kzgProofs [][]byte) ([]*ethpb.BlobSidecar, error) {
if blk.Version() < version.Deneb {
return nil, nil // No blobs before deneb.
}

View File

@@ -6,6 +6,7 @@ import (
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
builderTest "github.com/OffchainLabs/prysm/v6/beacon-chain/builder/testing"
@@ -894,6 +895,9 @@ func injectSlashings(t *testing.T, st state.BeaconState, keys []bls.SecretKey, s
}
func TestProposer_ProposeBlock_OK(t *testing.T) {
// Initialize KZG for Fulu blocks
require.NoError(t, kzg.Start())
tests := []struct {
name string
block func([32]byte) *ethpb.GenericSignedBeaconBlock
@@ -1098,6 +1102,131 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
},
err: "blob KZG commitments don't match number of blobs or KZG proofs",
},
{
name: "fulu block no blob",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
sb := &ethpb.SignedBeaconBlockContentsFulu{
Block: &ethpb.SignedBeaconBlockFulu{
Block: &ethpb.BeaconBlockElectra{Slot: 5, ParentRoot: parent[:], Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{})},
},
}
blk := &ethpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
},
{
name: "fulu block with single blob and cell proofs",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
numberOfColumns := uint64(128)
// For Fulu, we have cell proofs (blobs * numberOfColumns)
cellProofs := make([][]byte, numberOfColumns)
for i := uint64(0); i < numberOfColumns; i++ {
cellProofs[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
}
// Blob must be exactly 131072 bytes
blob := make([]byte, 131072)
blob[0] = 0x01
sb := &ethpb.SignedBeaconBlockContentsFulu{
Block: &ethpb.SignedBeaconBlockFulu{
Block: &ethpb.BeaconBlockElectra{
Slot: 5, ParentRoot: parent[:],
Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{
BlobKzgCommitments: [][]byte{bytesutil.PadTo([]byte("kc"), 48)},
}),
},
},
KzgProofs: cellProofs,
Blobs: [][]byte{blob},
}
blk := &ethpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
},
{
name: "fulu block with multiple blobs and cell proofs",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
numberOfColumns := uint64(128)
blobCount := 3
// For Fulu, we have cell proofs (blobs * numberOfColumns)
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
for i := range cellProofs {
cellProofs[i] = bytesutil.PadTo([]byte{byte(i % 256)}, 48)
}
// Create properly sized blobs (131072 bytes each)
blobs := make([][]byte, blobCount)
for i := 0; i < blobCount; i++ {
blob := make([]byte, 131072)
blob[0] = byte(i + 1)
blobs[i] = blob
}
sb := &ethpb.SignedBeaconBlockContentsFulu{
Block: &ethpb.SignedBeaconBlockFulu{
Block: &ethpb.BeaconBlockElectra{
Slot: 5, ParentRoot: parent[:],
Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{
BlobKzgCommitments: [][]byte{
bytesutil.PadTo([]byte("kc"), 48),
bytesutil.PadTo([]byte("kc1"), 48),
bytesutil.PadTo([]byte("kc2"), 48),
},
}),
},
},
KzgProofs: cellProofs,
Blobs: blobs,
}
blk := &ethpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
},
{
name: "fulu block wrong cell proof count (should be blobs * 128)",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
// Wrong number of cell proofs - should be 2 * 128 = 256, but providing only 2
// Create properly sized blobs
blob1 := make([]byte, 131072)
blob1[0] = 0x01
blob2 := make([]byte, 131072)
blob2[0] = 0x02
sb := &ethpb.SignedBeaconBlockContentsFulu{
Block: &ethpb.SignedBeaconBlockFulu{
Block: &ethpb.BeaconBlockElectra{
Slot: 5, ParentRoot: parent[:],
Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{
BlobKzgCommitments: [][]byte{
bytesutil.PadTo([]byte("kc"), 48),
bytesutil.PadTo([]byte("kc1"), 48),
},
}),
},
},
KzgProofs: [][]byte{{0x01}, {0x02}}, // Wrong: should be 256 cell proofs
Blobs: [][]byte{blob1, blob2},
}
blk := &ethpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
err: "blobs and cells proofs mismatch",
},
{
name: "blind fulu block with blob commitments",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
blockToPropose := util.NewBlindedBeaconBlockFulu()
blockToPropose.Message.Slot = 5
blockToPropose.Message.ParentRoot = parent[:]
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*enginev1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
blockToPropose.Message.Body.ExecutionPayloadHeader.TransactionsRoot = txRoot[:]
blockToPropose.Message.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
blockToPropose.Message.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}
blk := &ethpb.GenericSignedBeaconBlock_BlindedFulu{BlindedFulu: blockToPropose}
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
useBuilder: true,
err: "commitment value doesn't match block", // Known issue with mock builder cell proof mismatch
},
}
for _, tt := range tests {
@@ -1111,15 +1240,29 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
db := dbutil.SetupDB(t)
// Create cell proofs for Fulu blocks (128 proofs per blob)
numberOfColumns := uint64(128)
cellProofs := make([][]byte, numberOfColumns)
for i := uint64(0); i < numberOfColumns; i++ {
cellProofs[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
}
// Create properly sized blob for mock builder
mockBlob := make([]byte, 131072)
mockBlob[0] = 0x03
// Use the same commitment as in the blind block test
mockCommitment := bytesutil.PadTo([]byte{0x01}, 48)
proposerServer := &Server{
BlockReceiver: c,
BlockNotifier: c.BlockNotifier(),
P2P: mockp2p.NewTestP2P(t),
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: tt.useBuilder, PayloadCapella: emptyPayloadCapella(), PayloadDeneb: emptyPayloadDeneb(),
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}}},
BeaconDB: db,
BlobReceiver: c,
OperationNotifier: c.OperationNotifier(),
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{mockCommitment}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}},
BlobBundleV2: &enginev1.BlobsBundleV2{KzgCommitments: [][]byte{mockCommitment}, Proofs: cellProofs, Blobs: [][]byte{mockBlob}}},
BeaconDB: db,
BlobReceiver: c,
DataColumnReceiver: c, // Add DataColumnReceiver for Fulu blocks
OperationNotifier: c.OperationNotifier(),
}
blockToPropose := tt.block(bsRoot)
res, err := proposerServer.ProposeBeaconBlock(t.Context(), blockToPropose)

View File

@@ -69,6 +69,7 @@ type Server struct {
SyncCommitteePool synccommittee.Pool
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher

View File

@@ -89,6 +89,7 @@ type Config struct {
AttestationReceiver blockchain.AttestationReceiver
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
ExecutionChainService execution.Chain
ChainStartFetcher execution.ChainStartFetcher
ExecutionChainInfoFetcher execution.ChainInfoFetcher
@@ -238,6 +239,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
P2P: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
BlobReceiver: s.cfg.BlobReceiver,
DataColumnReceiver: s.cfg.DataColumnReceiver,
MockEth1Votes: s.cfg.MockEth1Votes,
Eth1BlockFetcher: s.cfg.ExecutionChainService,
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,

View File

@@ -8,6 +8,7 @@ go_library(
"mock_blocker.go",
"mock_exec_chain_info_fetcher.go",
"mock_genesis_timefetcher.go",
"mock_sidecars.go",
"mock_stater.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil",

View File

@@ -0,0 +1,44 @@
package testutil
import ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
// CreateDataColumnSidecar generates a filled dummy data column sidecar
func CreateDataColumnSidecar(index uint64, data []byte) *ethpb.DataColumnSidecar {
return &ethpb.DataColumnSidecar{
Index: index,
Column: [][]byte{data},
SignedBlockHeader: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
KzgCommitments: [][]byte{make([]byte, 48)},
KzgProofs: [][]byte{make([]byte, 48)},
KzgCommitmentsInclusionProof: [][]byte{make([]byte, 32)},
}
}
// CreateBlobSidecar generates a filled dummy data blob sidecar
func CreateBlobSidecar(index uint64, blob []byte) *ethpb.BlobSidecar {
return &ethpb.BlobSidecar{
Index: index,
Blob: blob,
SignedBlockHeader: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
KzgCommitment: make([]byte, 48),
KzgProof: make([]byte, 48),
}
}

View File

@@ -155,6 +155,7 @@ go_library(
"@com_github_trailofbits_go_mutexasserts//:go_default_library",
"@io_opentelemetry_go_otel_trace//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_x_sync//singleflight:go_default_library",
],
)

View File

@@ -29,13 +29,14 @@ import (
// DataColumnSidecarsParams stores the common parameters needed to
// fetch data column sidecars from peers.
type DataColumnSidecarsParams struct {
Ctx context.Context // Context
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
P2P prysmP2P.P2P // P2P network interface
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
Storage filesystem.DataColumnStorageReader // Data columns storage
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
Ctx context.Context // Context
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
P2P prysmP2P.P2P // P2P network interface
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
Storage filesystem.DataColumnStorageReader // Data columns storage
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
DownscorePeerOnRPCFault bool // Downscore a peer if it commits an RPC fault. Not responding sidecars at all is considered as a fault.
}
// FetchDataColumnSidecars retrieves data column sidecars from storage and peers for the given

View File

@@ -25,15 +25,14 @@ const (
// all data column sidecars. Then, it saves missing sidecars to the store.
// After a delay, it broadcasts in the background not seen via gossip
// (but reconstructed) sidecars.
func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
ctx context.Context,
slot primitives.Slot,
proposerIndex primitives.ValidatorIndex,
root [fieldparams.RootLength]byte,
) error {
func (s *Service) reconstructSaveBroadcastDataColumnSidecars(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
startTime := time.Now()
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
root := sidecar.BlockRoot()
slot := sidecar.Slot()
proposerIndex := sidecar.ProposerIndex()
// Lock to prevent concurrent reconstructions.
s.reconstructionLock.Lock()
defer s.reconstructionLock.Unlock()
@@ -198,7 +197,7 @@ func (s *Service) broadcastMissingDataColumnSidecars(
subnet := peerdas.ComputeSubnetForDataColumnSidecar(verifiedRODataColumn.Index)
// Broadcast the missing data column.
if err := s.cfg.p2p.BroadcastDataColumnSidecar(root, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
if err := s.cfg.p2p.BroadcastDataColumnSidecar(subnet, verifiedRODataColumn); err != nil {
log.WithError(err).Error("Broadcast data column")
}
@@ -220,3 +219,203 @@ func (s *Service) broadcastMissingDataColumnSidecars(
return nil
}
// processDataColumnSidecarsFromExecutionFromBlock retrieves (if available) data column sidecars data from the execution client,
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
func (s *Service) processDataColumnSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) error {
const delay = 250 * time.Millisecond
secondsPerHalfSlot := time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second
numberOfColumns := params.BeaconConfig().NumberOfColumns
root := roBlock.Root()
block := roBlock.Block()
slot := block.Slot()
proposerIndex := block.ProposerIndex()
commitments, err := block.Body().BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "blob kzg commitments")
}
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
defer cancel()
for {
// Check if some data column sidecars to custody are missing.
missingIndices, err := s.missingDataColumnSidecars(root, commitments)
if err != nil {
return errors.Wrap(err, "missing data column sidecars")
}
// Return early if all needed data column sidecars are already available in storage.
if len(missingIndices) == 0 {
return nil
}
// Try to reconstruct data column sidecars from the execution client.
sidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromBlock(roBlock))
if err != nil {
return errors.Wrap(err, "reconstruct data column sidecars")
}
// No sidecars are retrieved from the EL, retry later
sidecarCount := uint64(len(sidecars))
if sidecarCount == 0 {
if ctx.Err() != nil {
return ctx.Err()
}
time.Sleep(delay)
continue
}
// Boundary check.
if sidecarCount != numberOfColumns {
return errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", sidecarCount, numberOfColumns)
}
// Broadcast and save data column sidecars to custody but not yet received.
for index := range missingIndices {
if index >= sidecarCount {
return errors.Errorf("data column index %d >= sidecar count %d - should never happen", index, sidecarCount)
}
// This sidecar has been received in the meantime, skip it.
if s.hasSeenDataColumnIndex(slot, proposerIndex, index) {
continue
}
sidecar := sidecars[index]
if err := s.cfg.p2p.BroadcastDataColumnSidecar(sidecar.Index, sidecar); err != nil {
return errors.Wrap(err, "broadcast data column sidecar")
}
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
return errors.Wrap(err, "receive data column sidecar")
}
}
return nil
}
}
func (s *Service) processDataColumnSidecarsFromExecutionFromColumnSidecar(ctx context.Context, sidecar blocks.VerifiedRODataColumn) error {
const delay = 250 * time.Millisecond
secondsPerHalfSlot := time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second
numberOfColumns := params.BeaconConfig().NumberOfColumns
commitments := sidecar.KzgCommitments
root := sidecar.BlockRoot()
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
defer cancel()
for {
// Check if some data column sidecars to custody are missing.
missingIndices, err := s.missingDataColumnSidecars(root, commitments)
if err != nil {
return errors.Wrap(err, "missing data column sidecars")
}
// Return early if all needed data column sidecars are already available in storage.
if len(missingIndices) == 0 {
return nil
}
// Try to reconstruct data column sidecars from the execution client.
sidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, peerdas.PopulateFromSidecar(sidecar.RODataColumn))
if err != nil {
return errors.Wrap(err, "reconstruct data column sidecars")
}
// No sidecars are retrieved from the EL, retry later
sidecarCount := uint64(len(sidecars))
if sidecarCount == 0 {
if ctx.Err() != nil {
return ctx.Err()
}
time.Sleep(delay)
continue
}
// Boundary check.
if sidecarCount != numberOfColumns {
return errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", sidecarCount, numberOfColumns)
}
blockSlot, proposerIndex := sidecar.Slot(), sidecar.ProposerIndex()
// Broadcast and save data column sidecars to custody but not yet received.
for index := range missingIndices {
log := log.WithField("columnIndex", index)
if index >= sidecarCount {
return errors.Errorf("data column index %d >= sidecar count %d - should never happen", index, sidecarCount)
}
// This sidecar has been received in the meantime, skip it.
if s.hasSeenDataColumnIndex(blockSlot, proposerIndex, index) {
continue
}
sidecar := sidecars[index]
if err := s.cfg.p2p.BroadcastDataColumnSidecar(sidecar.Index, sidecar); err != nil {
log.WithError(err).Error("Failed to broadcast data column")
}
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
log.WithError(err).Error("Failed to receive data column")
}
}
return nil
}
}
// missingDataColumnSidecars returns the data column indices we should custody and that are missing in our storage
// for the given read only beacon block.
func (s *Service) missingDataColumnSidecars(root [fieldparams.RootLength]byte, commitments [][]byte) (map[uint64]bool, error) {
// Return early if there is not commitments.
if len(commitments) == 0 {
return nil, nil
}
// Retrieve our node ID.
nodeID := s.cfg.p2p.NodeID()
// Get the custody group sampling size for the node.
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
if err != nil {
return nil, errors.Wrap(err, "custody group count")
}
// Compute the sampling size.
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#custody-sampling
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
samplingSize := max(samplesPerSlot, custodyGroupCount)
// Get the peer info for the node.
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
if err != nil {
return nil, errors.Wrap(err, "peer info")
}
// Get the indices of the data column sidecars we have in the store.
storedIndices := s.cfg.dataColumnStorage.Summary(root).Stored()
// List indices we should custody and that are missing in our storage.
missingIndices := make(map[uint64]bool, samplingSize)
for index := range peerInfo.CustodyColumns {
if !storedIndices[index] {
missingIndices[index] = true
}
}
return missingIndices, nil
}

View File

@@ -4,10 +4,14 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
chainMock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
mockp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -27,9 +31,6 @@ func TestReconstructDataColumns(t *testing.T) {
roBlock, _, verifiedRoDataColumns := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
require.Equal(t, numberOfColumns, uint64(len(verifiedRoDataColumns)))
root, block := roBlock.Root(), roBlock.Block()
slot, proposerIndex := block.Slot(), block.ProposerIndex()
minimumCount := peerdas.MinimumColumnCountToReconstruct()
t.Run("not enough stored sidecars", func(t *testing.T) {
@@ -38,7 +39,7 @@ func TestReconstructDataColumns(t *testing.T) {
require.NoError(t, err)
service := NewService(ctx, WithP2P(p2ptest.NewTestP2P(t)), WithDataColumnStorage(storage))
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root)
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, verifiedRoDataColumns[0])
require.NoError(t, err)
})
@@ -48,7 +49,7 @@ func TestReconstructDataColumns(t *testing.T) {
require.NoError(t, err)
service := NewService(ctx, WithP2P(p2ptest.NewTestP2P(t)), WithDataColumnStorage(storage))
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root)
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, verifiedRoDataColumns[0])
require.NoError(t, err)
})
@@ -72,7 +73,7 @@ func TestReconstructDataColumns(t *testing.T) {
WithChainService(&mockChain.ChainService{}),
)
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root)
err = service.reconstructSaveBroadcastDataColumnSidecars(ctx, verifiedRoDataColumns[0])
require.NoError(t, err)
expected := make(map[uint64]bool, minimumCount+cgc)
@@ -85,7 +86,7 @@ func TestReconstructDataColumns(t *testing.T) {
expected[i] = true
}
summary := storage.Summary(root)
summary := storage.Summary(roBlock.Root())
actual := summary.Stored()
require.Equal(t, len(expected), len(actual))
@@ -175,3 +176,111 @@ func TestBroadcastMissingDataColumnSidecars(t *testing.T) {
require.Equal(t, true, p2p.BroadcastCalled.Load())
})
}
func TestMissingDataColumnSidecars(t *testing.T) {
ctx := t.Context()
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
t.Run("no commitments", func(t *testing.T) {
service := NewService(ctx, WithP2P(p2ptest.NewTestP2P(t)))
root := [fieldparams.RootLength]byte{0x01, 0x02, 0x03} // Some test root
commitments := [][]byte{}
missing, err := service.missingDataColumnSidecars(root, commitments)
require.NoError(t, err)
require.Equal(t, 0, len(missing))
})
t.Run("some sidecars missing", func(t *testing.T) {
const (
blobCount = 2
cgc = 8 // custody group count
)
// Generate test data
roBlock, _, verifiedRoDataColumns := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
root := roBlock.Root()
// Create commitments from the block
commitments, err := roBlock.Block().Body().BlobKzgCommitments()
require.NoError(t, err)
// Setup storage with only some of the sidecars
storage := filesystem.NewEphemeralDataColumnStorage(t)
p2p := p2ptest.NewTestP2P(t)
service := NewService(ctx, WithP2P(p2p), WithDataColumnStorage(storage))
// Update custody info to set custody group count
_, _, err = service.cfg.p2p.UpdateCustodyInfo(0, cgc)
require.NoError(t, err)
// Save only some of the sidecars that the node should custody
// The node should custody indices: [1, 17, 19, 42, 75, 87, 102, 117]
// Save only indices 1, 42, and 102
storedIndices := []uint64{1, 42, 102}
toSave := make([]blocks.VerifiedRODataColumn, 0, len(storedIndices))
for _, index := range storedIndices {
toSave = append(toSave, verifiedRoDataColumns[index])
}
err = storage.Save(toSave)
require.NoError(t, err)
// Test function
missing, err := service.missingDataColumnSidecars(root, commitments)
require.NoError(t, err)
// Should be missing indices: 17, 19, 75, 87, 117
expectedMissing := map[uint64]bool{17: true, 19: true, 75: true, 87: true, 117: true}
require.Equal(t, len(expectedMissing), len(missing))
for index := range expectedMissing {
require.Equal(t, true, missing[index], "Index %d should be missing", index)
}
// Should NOT be missing stored indices
for _, storedIndex := range storedIndices {
require.Equal(t, false, missing[storedIndex], "Index %d should not be missing", storedIndex)
}
})
}
func TestProcessDataColumnSidecarsFromExecutionFromColumnSidecar(t *testing.T) {
const (
earliestAvailableSlot = 0
custodyGroupCount = 8
blobCount = 2
)
// Start the trusted setup.
err := kzg.Start()
require.NoError(t, err)
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
p2p := mockp2p.NewTestP2P(t)
_, _, err = p2p.UpdateCustodyInfo(0, custodyGroupCount)
require.NoError(t, err)
executionReconstructor := &mockExecution.EngineClient{
DataColumnSidecars: verifiedRoSidecars,
}
chain := new(chainMock.ChainService)
service := Service{
cfg: &config{
p2p: p2p,
dataColumnStorage: filesystem.NewEphemeralDataColumnStorage(t),
executionReconstructor: executionReconstructor,
chain: chain,
operationNotifier: new(chainMock.MockOperationNotifier),
},
seenDataColumnCache: newSlotAwareCache(1),
}
err = service.processDataColumnSidecarsFromExecutionFromColumnSidecar(t.Context(), verifiedRoSidecars[0])
require.NoError(t, err)
require.Equal(t, custodyGroupCount, len(chain.DataColumns))
}

View File

@@ -34,6 +34,7 @@ go_library(
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
@@ -108,7 +109,9 @@ go_test(
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
"@com_github_libp2p_go_libp2p//:go_default_library",
"@com_github_libp2p_go_libp2p//core:go_default_library",
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
"@com_github_paulbellamy_ratecounter//:go_default_library",

View File

@@ -22,6 +22,7 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/crypto/rand"
@@ -209,6 +210,8 @@ func (s *Service) Start() {
// fetchOriginSidecars fetches origin sidecars
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
const delay = 10 * time.Second // The delay between each attempt to fetch origin data column sidecars
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
return nil
@@ -234,7 +237,7 @@ func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
blockVersion := roBlock.Version()
if blockVersion >= version.Fulu {
if err := s.fetchOriginColumns(peers, roBlock); err != nil {
if err := s.fetchOriginColumns(roBlock, delay); err != nil {
return errors.Wrap(err, "fetch origin columns")
}
return nil
@@ -391,7 +394,11 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
}
func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) error {
func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration) error {
const (
errorMessage = "Failed to fetch origin data column sidecars"
warningIteration = 10
)
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// Return early if the origin block has no blob commitments.
@@ -420,21 +427,40 @@ func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) err
root := roBlock.Root()
params := sync.DataColumnSidecarsParams{
Ctx: s.ctx,
Tor: s.clock,
P2P: s.cfg.P2P,
CtxMap: s.ctxMap,
Storage: s.cfg.DataColumnStorage,
NewVerifier: s.newDataColumnsVerifier,
Ctx: s.ctx,
Tor: s.clock,
P2P: s.cfg.P2P,
CtxMap: s.ctxMap,
Storage: s.cfg.DataColumnStorage,
NewVerifier: s.newDataColumnsVerifier,
DownscorePeerOnRPCFault: true,
}
verfifiedRoDataColumnsByRoot, err := sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
if err != nil {
return errors.Wrap(err, "fetch data column sidecars")
var verifiedRoDataColumnsByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn
for attempt := uint64(0); ; attempt++ {
verifiedRoDataColumnsByRoot, err = sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
if err == nil {
break
}
log := log.WithError(err).WithFields(logrus.Fields{
"attempt": attempt,
"delay": delay,
})
if attempt%warningIteration == 0 && attempt > 0 {
log.Warning(errorMessage)
time.Sleep(delay)
continue
}
log.Debug(errorMessage)
time.Sleep(delay)
}
// Save origin data columns to disk.
verifiedRoDataColumnsSidecars, ok := verfifiedRoDataColumnsByRoot[root]
verifiedRoDataColumnsSidecars, ok := verifiedRoDataColumnsByRoot[root]
if !ok {
return fmt.Errorf("cannot extract origins data column sidecars for block root %#x - should never happen", root)
}
@@ -447,7 +473,7 @@ func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) err
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
"blobCount": len(commitments),
"columnCount": len(verifiedRoDataColumnsSidecars),
}).Info("Successfully downloaded data columns for checkpoint sync block")
}).Info("Successfully downloaded data column sidecars for checkpoint sync block")
return nil
}

View File

@@ -2,6 +2,7 @@ package initialsync
import (
"context"
"fmt"
"sync"
"testing"
"time"
@@ -13,8 +14,12 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
prysmSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
@@ -22,10 +27,14 @@ import (
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/paulbellamy/ratecounter"
logTest "github.com/sirupsen/logrus/hooks/test"
@@ -663,3 +672,147 @@ func TestFetchOriginSidecars(t *testing.T) {
}
})
}
func TestFetchOriginColumns(t *testing.T) {
// Load the trusted setup.
err := kzg.Start()
require.NoError(t, err)
// Setup test environment
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 0
params.OverrideBeaconConfig(cfg)
const (
delay = 0
blobCount = 1
)
t.Run("block has no commitments", func(t *testing.T) {
service := new(Service)
// Create a block with no blob commitments
block := util.NewBeaconBlockFulu()
signedBlock, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
roBlock, err := blocks.NewROBlock(signedBlock)
require.NoError(t, err)
err = service.fetchOriginColumns(roBlock, delay)
require.NoError(t, err)
})
t.Run("FetchDataColumnSidecars succeeds immediately", func(t *testing.T) {
storage := filesystem.NewEphemeralDataColumnStorage(t)
p2p := p2ptest.NewTestP2P(t)
service := &Service{
cfg: &Config{
P2P: p2p,
DataColumnStorage: storage,
},
}
// Create a block with blob commitments and sidecars
roBlock, _, verifiedSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
// Store all sidecars in advance so FetchDataColumnSidecars succeeds immediately
err := storage.Save(verifiedSidecars)
require.NoError(t, err)
err = service.fetchOriginColumns(roBlock, delay)
require.NoError(t, err)
})
t.Run("first attempt to FetchDataColumnSidecars fails but second attempt succeeds", func(t *testing.T) {
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
storage := filesystem.NewEphemeralDataColumnStorage(t)
// Custody columns with this private key and 4-cgc: 31, 81, 97, 105
privateKeyBytes := [32]byte{1}
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
require.NoError(t, err)
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
p2p.Peers().SetConnectionState(other.PeerID(), peers.Connected)
p2p.Connect(other)
p2p.Peers().SetChainState(other.PeerID(), &ethpb.StatusV2{
HeadSlot: 5,
})
other.ENR().Set(peerdas.Cgc(numberOfCustodyGroups))
p2p.Peers().UpdateENR(other.ENR(), other.PeerID())
expectedRequest := &ethpb.DataColumnSidecarsByRangeRequest{
StartSlot: 0,
Count: 1,
Columns: []uint64{1, 17, 19, 42, 75, 87, 102, 117},
}
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
gs := startup.NewClockSynchronizer()
err = gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
require.NoError(t, err)
waiter := verification.NewInitializerWaiter(gs, nil, nil)
initializer, err := waiter.WaitForInitializer(t.Context())
require.NoError(t, err)
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
// Create a block with blob commitments and sidecars
roBlock, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
ctxMap, err := prysmSync.ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
require.NoError(t, err)
service := &Service{
ctx: t.Context(),
clock: clock,
newDataColumnsVerifier: newDataColumnsVerifier,
cfg: &Config{
P2P: p2p,
DataColumnStorage: storage,
},
ctxMap: ctxMap,
}
// Do not respond any sidecar on the first attempt, and respond everything requested on the second one.
firstAttempt := true
other.SetStreamHandler(protocol, func(stream network.Stream) {
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
assert.NoError(t, err)
assert.DeepEqual(t, expectedRequest, actualRequest)
if firstAttempt {
firstAttempt = false
err = stream.CloseWrite()
assert.NoError(t, err)
return
}
for _, column := range actualRequest.Columns {
err = prysmSync.WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedRoSidecars[column].DataColumnSidecar)
assert.NoError(t, err)
}
err = stream.CloseWrite()
assert.NoError(t, err)
})
err = service.fetchOriginColumns(roBlock, delay)
require.NoError(t, err)
// Check all corresponding sidecars are saved in the store.
summary := storage.Summary(roBlock.Root())
for _, index := range expectedRequest.Columns {
require.Equal(t, true, summary.HasIndex(index))
}
})
}

View File

@@ -453,6 +453,10 @@ func SendDataColumnSidecarsByRangeRequest(
// Send the request.
stream, err := p.P2P.Send(p.Ctx, request, topic, pid)
if err != nil {
if p.DownscorePeerOnRPCFault {
downscorePeer(p.P2P, pid, "cannotSendDataColumnSidecarsByRangeRequest")
}
return nil, errors.Wrap(err, "p2p send")
}
defer closeStream(stream, log)
@@ -467,6 +471,10 @@ func SendDataColumnSidecarsByRangeRequest(
validatorSlotWithinBounds, err := isSidecarSlotWithinBounds(request)
if err != nil {
if p.DownscorePeerOnRPCFault {
downscorePeer(p.P2P, pid, "servedSidecarSlotOutOfBounds")
}
return nil, errors.Wrap(err, "is sidecar slot within bounds")
}
@@ -476,9 +484,17 @@ func SendDataColumnSidecarsByRangeRequest(
isSidecarIndexRequested(request),
)
if errors.Is(err, io.EOF) {
if p.DownscorePeerOnRPCFault && len(roDataColumns) == 0 {
downscorePeer(p.P2P, pid, "noReturnedSidecar")
}
return roDataColumns, nil
}
if err != nil {
if p.DownscorePeerOnRPCFault {
downscorePeer(p.P2P, pid, "readChunkedDataColumnSidecarError")
}
return nil, errors.Wrap(err, "read chunked data column sidecar")
}
@@ -491,6 +507,10 @@ func SendDataColumnSidecarsByRangeRequest(
// All requested sidecars were delivered by the peer. Expecting EOF.
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
if p.DownscorePeerOnRPCFault {
downscorePeer(p.P2P, pid, "tooManyResponseDataColumnSidecars")
}
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", totalCount)
}
@@ -528,7 +548,8 @@ func isSidecarIndexRequested(request *ethpb.DataColumnSidecarsByRangeRequest) Da
return func(sidecar blocks.RODataColumn) error {
columnIndex := sidecar.Index
if !requestedIndices[columnIndex] {
return errors.Errorf("data column sidecar index %d not found in requested indices", columnIndex)
requested := sortedSliceFromMap(requestedIndices)
return errors.Errorf("data column sidecar index %d returned by the peer but not found in requested indices %v", columnIndex, requested)
}
return nil
@@ -566,6 +587,10 @@ func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer
// Send the request to the peer.
stream, err := p.P2P.Send(p.Ctx, identifiers, topic, peer)
if err != nil {
if p.DownscorePeerOnRPCFault {
downscorePeer(p.P2P, peer, "cannotSendDataColumnSidecarsByRootRequest")
}
return nil, errors.Wrap(err, "p2p api send")
}
defer closeStream(stream, log)
@@ -577,9 +602,17 @@ func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer
for range count {
roDataColumn, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap, isSidecarIndexRootRequested(identifiers))
if errors.Is(err, io.EOF) {
if p.DownscorePeerOnRPCFault && len(roDataColumns) == 0 {
downscorePeer(p.P2P, peer, "noReturnedSidecar")
}
return roDataColumns, nil
}
if err != nil {
if p.DownscorePeerOnRPCFault {
downscorePeer(p.P2P, peer, "readChunkedDataColumnSidecarError")
}
return nil, errors.Wrap(err, "read chunked data column sidecar")
}
@@ -592,6 +625,10 @@ func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer
// All requested sidecars were delivered by the peer. Expecting EOF.
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
if p.DownscorePeerOnRPCFault {
downscorePeer(p.P2P, peer, "tooManyResponseDataColumnSidecars")
}
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", count)
}
@@ -689,3 +726,13 @@ func readChunkedDataColumnSidecar(
return &roDataColumn, nil
}
func downscorePeer(p2p p2p.P2P, peerID peer.ID, reason string, fields ...logrus.Fields) {
log := log
for _, field := range fields {
log = log.WithFields(field)
}
newScore := p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
}

View File

@@ -50,6 +50,7 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/trailofbits/go-mutexasserts"
"golang.org/x/sync/singleflight"
)
var _ runtime.Service = (*Service)(nil)
@@ -169,6 +170,7 @@ type Service struct {
verifierWaiter *verification.InitializerWaiter
newBlobVerifier verification.NewBlobVerifier
newColumnsVerifier verification.NewDataColumnsVerifier
columnSidecarsExecSingleFlight singleflight.Group
availableBlocker coverage.AvailableBlocker
reconstructionLock sync.Mutex
reconstructionRandGen *rand.Rand

View File

@@ -7,7 +7,6 @@ import (
"path"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v6/config/features"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
@@ -15,7 +14,7 @@ import (
"github.com/OffchainLabs/prysm/v6/io/file"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/sirupsen/logrus"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
)
@@ -37,7 +36,11 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return err
}
go s.processSidecarsFromExecution(ctx, signed)
roBlock, err := blocks.NewROBlockWithRoot(signed, root)
if err != nil {
return errors.Wrap(err, "new roblock with root")
}
go s.processSidecarsFromExecutionFromBlock(ctx, roBlock)
if err := s.cfg.chain.ReceiveBlock(ctx, signed, root, nil); err != nil {
if blockchain.IsInvalidBlock(err) {
@@ -61,11 +64,23 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
return err
}
// processSidecarsFromExecution retrieves (if available) sidecars data from the execution client,
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
func (s *Service) processSidecarsFromExecution(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) {
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, block blocks.ROBlock) {
if block.Version() >= version.Fulu {
s.processDataColumnSidecarsFromExecution(ctx, block)
key := fmt.Sprintf("%#x", block.Root())
if _, err, _ := s.columnSidecarsExecSingleFlight.Do(key, func() (interface{}, error) {
if err := s.processDataColumnSidecarsFromExecutionFromBlock(ctx, block); err != nil {
return nil, err
}
return nil, nil
}); err != nil {
log.WithError(err).Error("Failed to process data column sidecars from execution")
return
}
return
}
@@ -75,92 +90,6 @@ func (s *Service) processSidecarsFromExecution(ctx context.Context, block interf
}
}
// processDataColumnSidecarsFromExecution retrieves (if available) data column sidecars data from the execution client,
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, roSignedBlock interfaces.ReadOnlySignedBeaconBlock) {
block := roSignedBlock.Block()
log := log.WithFields(logrus.Fields{
"slot": block.Slot(),
"proposerIndex": block.ProposerIndex(),
})
kzgCommitments, err := block.Body().BlobKzgCommitments()
if err != nil {
log.WithError(err).Error("Failed to read commitments from block")
return
}
if len(kzgCommitments) == 0 {
// No blobs to reconstruct.
return
}
blockRoot, err := block.HashTreeRoot()
if err != nil {
log.WithError(err).Error("Failed to calculate block root")
return
}
log = log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot))
if s.cfg.dataColumnStorage == nil {
log.Warning("Data column storage is not enabled, skip saving data column, but continue to reconstruct and broadcast data column")
}
// When this function is called, it's from the time when the block is received, so in almost all situations we need to get the data column from EL instead of the blob storage.
sidecars, err := s.cfg.executionReconstructor.ReconstructDataColumnSidecars(ctx, roSignedBlock, blockRoot)
if err != nil {
log.WithError(err).Debug("Cannot reconstruct data column sidecars after receiving the block")
return
}
// Return early if no blobs are retrieved from the EL.
if len(sidecars) == 0 {
return
}
nodeID := s.cfg.p2p.NodeID()
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
if err != nil {
log.WithError(err).Error("Failed to get custody group count")
return
}
info, _, err := peerdas.Info(nodeID, custodyGroupCount)
if err != nil {
log.WithError(err).Error("Failed to get peer info")
return
}
blockSlot := block.Slot()
proposerIndex := block.ProposerIndex()
// Broadcast and save data column sidecars to custody but not yet received.
sidecarCount := uint64(len(sidecars))
for columnIndex := range info.CustodyColumns {
log := log.WithField("columnIndex", columnIndex)
if columnIndex >= sidecarCount {
log.Error("Column custody index out of range - should never happen")
continue
}
if s.hasSeenDataColumnIndex(blockSlot, proposerIndex, columnIndex) {
continue
}
sidecar := sidecars[columnIndex]
if err := s.cfg.p2p.BroadcastDataColumnSidecar(blockRoot, sidecar.Index, sidecar.DataColumnSidecar); err != nil {
log.WithError(err).Error("Failed to broadcast data column")
}
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
log.WithError(err).Error("Failed to receive data column")
}
}
}
// processBlobSidecarsFromExecution retrieves (if available) blob sidecars data from the execution client,
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
func (s *Service) processBlobSidecarsFromExecution(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) {

View File

@@ -135,7 +135,7 @@ func TestService_BeaconBlockSubscribe_UndefinedEeError(t *testing.T) {
require.Equal(t, 1, len(s.seenBlockCache.Keys()))
}
func TestReconstructAndBroadcastBlobs(t *testing.T) {
func TestProcessSidecarsFromExecutionFromBlock(t *testing.T) {
t.Run("blobs", func(t *testing.T) {
rob, err := blocks.NewROBlob(
&ethpb.BlobSidecar{
@@ -192,7 +192,9 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
},
seenBlobCache: lruwrpr.New(1),
}
s.processSidecarsFromExecution(context.Background(), sb)
rob, err := blocks.NewROBlock(sb)
require.NoError(t, err)
s.processSidecarsFromExecutionFromBlock(context.Background(), rob)
require.Equal(t, tt.expectedBlobCount, len(chainService.Blobs))
})
}
@@ -253,7 +255,7 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
name: "Constructed 128 data columns with all blobs",
blobCount: 1,
dataColumnSidecars: allColumns,
expectedDataColumnCount: 4, // default is 4
expectedDataColumnCount: 8,
},
}
@@ -261,10 +263,10 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
s := Service{
cfg: &config{
p2p: mockp2p.NewTestP2P(t),
chain: chainService,
clock: startup.NewClock(time.Now(), [32]byte{}),
blobStorage: filesystem.NewEphemeralBlobStorage(t),
p2p: mockp2p.NewTestP2P(t),
chain: chainService,
clock: startup.NewClock(time.Now(), [32]byte{}),
dataColumnStorage: filesystem.NewEphemeralDataColumnStorage(t),
executionReconstructor: &mockExecution.EngineClient{
DataColumnSidecars: tt.dataColumnSidecars,
},
@@ -288,10 +290,11 @@ func TestReconstructAndBroadcastBlobs(t *testing.T) {
sb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
s.processSidecarsFromExecution(context.Background(), sb)
rob, err := blocks.NewROBlock(sb)
require.NoError(t, err)
s.processSidecarsFromExecutionFromBlock(t.Context(), rob)
require.Equal(t, tt.expectedDataColumnCount, len(chainService.DataColumns))
})
}
})
}

View File

@@ -21,14 +21,21 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
return errors.Wrap(err, "receive data column sidecar")
}
slot := sidecar.Slot()
proposerIndex := sidecar.ProposerIndex()
root := sidecar.BlockRoot()
if err := s.reconstructSaveBroadcastDataColumnSidecars(ctx, slot, proposerIndex, root); err != nil {
if err := s.reconstructSaveBroadcastDataColumnSidecars(ctx, sidecar); err != nil {
return errors.Wrap(err, "reconstruct/save/broadcast data column sidecars")
}
key := fmt.Sprintf("%#x", sidecar.BlockRoot())
if _, err, _ := s.columnSidecarsExecSingleFlight.Do(key, func() (interface{}, error) {
if err := s.processDataColumnSidecarsFromExecutionFromColumnSidecar(ctx, sidecar); err != nil {
return nil, err
}
return nil, nil
}); err != nil {
return errors.Wrap(err, "process data column sidecars from execution from sidecar")
}
return nil
}

View File

@@ -30,17 +30,10 @@ func GenerateTestDataColumns(t *testing.T, parent [fieldparams.RootLength]byte,
}
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
dataColumnSidecars, err := peerdas.DataColumnSidecars(roBlock, cellsAndProofs)
roDataColumnSidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(roBlock))
require.NoError(t, err)
roDataColumns := make([]blocks.RODataColumn, 0, len(dataColumnSidecars))
for i := range dataColumnSidecars {
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecars[i])
require.NoError(t, err)
roDataColumns = append(roDataColumns, roDataColumn)
}
return roDataColumns
return roDataColumnSidecars
}
func TestColumnSatisfyRequirement(t *testing.T) {

View File

@@ -0,0 +1,3 @@
### Changed
- Updated consensus spec from v1.6.0-alpha.5 to v1.6.0-alpha.6

View File

@@ -0,0 +1,3 @@
### Added
- Fulu block proposal changes for beacon api and gRPC.

View File

@@ -0,0 +1,2 @@
### Added
- Retry to fetch origin data column sidecars when starting from a checkpoint.

2
changelog/pop_fix-bug.md Normal file
View File

@@ -0,0 +1,2 @@
### Fixed
- Fix bug where arguments of fillInForkChoiceMissingBlocks were incorrectly placed

View File

@@ -0,0 +1,2 @@
### Ignored
- Fix error message.

View File

@@ -0,0 +1,3 @@
### Fixed
- Fix next epoch proposer duties in Fulu by advancing the state to the beginning of the current epoch.

View File

@@ -0,0 +1,2 @@
### Changed
- Start from justified checkpoint by default.

View File

@@ -0,0 +1,3 @@
### Changed
- Changed old atomic functions to new atomic.Int for safer and clearer code.

View File

@@ -0,0 +1,3 @@
### Added
- Add retry logic when GetBlobsV2 is called.
- Call GetBlobsV2 as soon as we receive the first data column sidecar or block

View File

@@ -24,8 +24,8 @@ import (
// These are variables that we don't use in Prysm. (i.e. future hardfork, light client... etc)
// IMPORTANT: Use one field per line and sort these alphabetically to reduce conflicts.
var placeholderFields = []string{
"AGGREGRATE_DUE_BPS",
"AGGREGRATE_DUE_BPS_GLOAS",
"AGGREGATE_DUE_BPS",
"AGGREGATE_DUE_BPS_GLOAS",
"ATTESTATION_DEADLINE",
"ATTESTATION_DUE_BPS",
"ATTESTATION_DUE_BPS_GLOAS",
@@ -42,6 +42,8 @@ var placeholderFields = []string{
"EIP7732_FORK_VERSION",
"EIP7805_FORK_EPOCH",
"EIP7805_FORK_VERSION",
"EIP7928_FORK_EPOCH",
"EIP7928_FORK_VERSION",
"EPOCHS_PER_SHUFFLING_PHASE",
"GLOAS_FORK_EPOCH",
"GLOAS_FORK_VERSION",

View File

@@ -24,10 +24,10 @@ func ErrNotSupported(funcName string, ver int) error {
// ThreadSafeEnumerator is a thread-safe counter of all objects created since the node's start.
type ThreadSafeEnumerator struct {
counter uint64
counter atomic.Uint64
}
// Inc increments the enumerator and returns the new object count.
func (c *ThreadSafeEnumerator) Inc() uint64 {
return atomic.AddUint64(&c.counter, 1)
return c.counter.Add(1)
}

View File

@@ -10,18 +10,18 @@ import (
// Arbitrary start time.
var start = time.Date(1990, 1, 2, 0, 0, 0, 0, time.UTC).Round(0)
var elapsed int64
var elapsed atomic.Int64
// We provide atomic access to elapsed to avoid data races between multiple
// concurrent goroutines during the tests.
func getElapsed() time.Duration {
return time.Duration(atomic.LoadInt64(&elapsed))
return time.Duration(elapsed.Load())
}
func setElapsed(v time.Duration) {
atomic.StoreInt64(&elapsed, int64(v))
elapsed.Store(int64(v))
}
func addToElapsed(v time.Duration) {
atomic.AddInt64(&elapsed, int64(v))
elapsed.Add(int64(v))
}
func reset(t *testing.T, c *Collector) {

View File

@@ -1,4 +1,4 @@
version: v1.6.0-alpha.5
version: v1.6.0-alpha.6
style: full
specrefs:
@@ -61,7 +61,7 @@ exceptions:
configs:
# Not implemented (placeholders)
- AGGREGRATE_DUE_BPS#phase0
- AGGREGATE_DUE_BPS#phase0
- ATTESTATION_DUE_BPS#phase0
- CONTRIBUTION_DUE_BPS#altair
- PROPOSER_REORG_CUTOFF_BPS#phase0
@@ -69,7 +69,7 @@ exceptions:
- SYNC_MESSAGE_DUE_BPS#altair
# Not implemented: gloas (future fork)
- AGGREGRATE_DUE_BPS_GLOAS#gloas
- AGGREGATE_DUE_BPS_GLOAS#gloas
- ATTESTATION_DUE_BPS_GLOAS#gloas
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
- GLOAS_FORK_EPOCH#gloas
@@ -168,6 +168,7 @@ exceptions:
# Not implemented: phase0
- calculate_committee_fraction#phase0
- compute_fork_version#phase0
- compute_pulled_up_tip#phase0
- filter_block_tree#phase0
- get_aggregate_and_proof#phase0

View File

@@ -18,11 +18,11 @@
ALTAIR_FORK_VERSION: Version = '0x01000000'
</spec>
- name: AGGREGRATE_DUE_BPS
- name: AGGREGATE_DUE_BPS
sources: []
spec: |
<spec config_var="AGGREGRATE_DUE_BPS" fork="phase0" hash="74073466">
AGGREGRATE_DUE_BPS: uint64 = 6667
<spec config_var="AGGREGATE_DUE_BPS" fork="phase0" hash="7eaa811a">
AGGREGATE_DUE_BPS: uint64 = 6667
</spec>
- name: ATTESTATION_PROPAGATION_SLOT_RANGE

View File

@@ -510,37 +510,42 @@
- file: beacon-chain/core/signing/signing_root.go
search: func ComputeForkDigest(
spec: |
<spec fn="compute_fork_digest" fork="phase0" hash="8b33f64d">
def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest:
"""
Return the 4-byte fork digest for the ``current_version`` and ``genesis_validators_root``.
This is a digest primarily used for domain separation on the p2p layer.
4-bytes suffices for practical separation of forks/chains.
"""
return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4])
</spec>
- name: compute_fork_digest#fulu
sources: []
spec: |
<spec fn="compute_fork_digest" fork="fulu" hash="24737097">
<spec fn="compute_fork_digest" fork="phase0" hash="e206968f">
def compute_fork_digest(
genesis_validators_root: Root,
# [New in Fulu:EIP7892]
epoch: Epoch,
) -> ForkDigest:
"""
Return the 4-byte fork digest for the ``version`` and ``genesis_validators_root``
XOR'd with the hash of the blob parameters for ``epoch``.
Return the 4-byte fork digest for the ``genesis_validators_root`` at a given ``epoch``.
This is a digest primarily used for domain separation on the p2p layer.
4-bytes suffices for practical separation of forks/chains.
"""
fork_version = compute_fork_version(epoch)
base_digest = compute_fork_data_root(fork_version, genesis_validators_root)
blob_parameters = get_blob_parameters(epoch)
return ForkDigest(base_digest[:4])
</spec>
- name: compute_fork_digest#fulu
sources: []
spec: |
<spec fn="compute_fork_digest" fork="fulu" hash="e916a595">
def compute_fork_digest(
genesis_validators_root: Root,
epoch: Epoch,
) -> ForkDigest:
"""
Return the 4-byte fork digest for the ``genesis_validators_root`` at a given ``epoch``.
This is a digest primarily used for domain separation on the p2p layer.
4-bytes suffices for practical separation of forks/chains.
"""
fork_version = compute_fork_version(epoch)
base_digest = compute_fork_data_root(fork_version, genesis_validators_root)
# [Modified in Fulu:EIP7892]
# Bitmask digest with hash of blob parameters
blob_parameters = get_blob_parameters(epoch)
return ForkDigest(
bytes(
xor(
@@ -2071,8 +2076,8 @@
- name: get_data_column_sidecars
sources:
- file: beacon-chain/core/peerdas/das_core.go
search: func DataColumnSidecars(
- file: beacon-chain/core/peerdas/validator.go
search: func dataColumnSidecars(
spec: |
<spec fn="get_data_column_sidecars" fork="fulu" hash="317fc596">
def get_data_column_sidecars(
@@ -2110,8 +2115,8 @@
- name: get_data_column_sidecars_from_block
sources:
- file: beacon-chain/core/peerdas/das_core.go
search: func dataColumnsSidecars(
- file: beacon-chain/core/peerdas/validator.go
search: func DataColumnSidecarsFromBlock(
spec: |
<spec fn="get_data_column_sidecars_from_block" fork="fulu" hash="02ffae23">
def get_data_column_sidecars_from_block(
@@ -2139,7 +2144,9 @@
</spec>
- name: get_data_column_sidecars_from_column_sidecar
sources: []
sources:
- file: beacon-chain/core/peerdas/validator.go
search: func DataColumnSidecarsFromColumnSidecar(
spec: |
<spec fn="get_data_column_sidecars_from_column_sidecar" fork="fulu" hash="4304cdec">
def get_data_column_sidecars_from_column_sidecar(
@@ -6088,7 +6095,7 @@
- file: beacon-chain/core/blocks/payload.go
search: func ProcessPayload(
spec: |
<spec fn="process_execution_payload" fork="fulu" hash="cc18d6ef">
<spec fn="process_execution_payload" fork="fulu" hash="9a872dd6">
def process_execution_payload(
state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine
) -> None:
@@ -6100,7 +6107,8 @@
assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
# Verify timestamp
assert payload.timestamp == compute_time_at_slot(state, state.slot)
# [Modified in Fulu:EIP7892] Verify commitments are under limit
# [Modified in Fulu:EIP7892]
# Verify commitments are under limit
assert (
len(body.blob_kzg_commitments)
<= get_blob_parameters(get_current_epoch(state)).max_blobs_per_block

View File

@@ -8,12 +8,11 @@ import (
"strings"
"testing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/require"
@@ -40,6 +39,7 @@ func RunBlockProcessingTest(t *testing.T, config, folderPath string) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.BlobSchedule = []params.BlobScheduleEntry{{MaxBlobsPerBlock: 9}}
cfg.FuluForkEpoch = 0 // assume epochs on tests for state are post fulu
params.OverrideBeaconConfig(cfg)
helpers.ClearCache()

View File

@@ -159,15 +159,13 @@ func GenerateTestFuluBlockWithSidecars(t *testing.T, blobCount int, options ...F
cellsAndProofs := GenerateCellsAndProofs(t, blobs)
sidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
rob, err := blocks.NewROBlockWithRoot(signedBeaconBlock, root)
require.NoError(t, err)
roSidecars, err := peerdas.ConstructDataColumnSidecar(cellsAndProofs, peerdas.PopulateFromBlock(rob))
require.NoError(t, err)
roSidecars := make([]blocks.RODataColumn, 0, len(sidecars))
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
for _, sidecar := range sidecars {
roSidecar, err := blocks.NewRODataColumnWithRoot(sidecar, root)
require.NoError(t, err)
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
for _, roSidecar := range roSidecars {
roVerifiedSidecar := blocks.NewVerifiedRODataColumn(roSidecar)
roSidecars = append(roSidecars, roSidecar)

View File

@@ -3,7 +3,6 @@ package client
import (
"fmt"
"strconv"
"sync/atomic"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
@@ -138,9 +137,11 @@ func (v *validator) LogSubmittedAtts(slot primitives.Slot) {
// LogSubmittedSyncCommitteeMessages logs info about submitted sync committee messages.
func (v *validator) LogSubmittedSyncCommitteeMessages() {
if v.syncCommitteeStats.totalMessagesSubmitted > 0 {
log.WithField("messages", v.syncCommitteeStats.totalMessagesSubmitted).Debug("Submitted sync committee messages successfully to beacon node")
if count := v.syncCommitteeStats.totalMessagesSubmitted.Load(); count > 0 {
log.WithField("messages", count).
Debug("Submitted sync committee messages successfully to beacon node")
// Reset the amount.
atomic.StoreUint64(&v.syncCommitteeStats.totalMessagesSubmitted, 0)
v.syncCommitteeStats.totalMessagesSubmitted.Store(0)
}
}

View File

@@ -3,7 +3,6 @@ package client
import (
"context"
"fmt"
"sync/atomic"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
@@ -94,7 +93,7 @@ func (v *validator) SubmitSyncCommitteeMessage(ctx context.Context, slot primiti
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(msg.BlockRoot)),
"validatorIndex": msg.ValidatorIndex,
}).Info("Submitted new sync message")
atomic.AddUint64(&v.syncCommitteeStats.totalMessagesSubmitted, 1)
v.syncCommitteeStats.totalMessagesSubmitted.Add(1)
}
// SubmitSignedContributionAndProof submits the signed sync committee contribution and proof to the beacon chain.

View File

@@ -13,6 +13,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/OffchainLabs/prysm/v6/api/client"
@@ -1571,5 +1572,5 @@ type voteStats struct {
// This tracks all validators' submissions for sync committees.
type syncCommitteeStats struct {
totalMessagesSubmitted uint64
totalMessagesSubmitted atomic.Uint64
}