mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
13 Commits
d929e1dcaa
...
backfill-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8cd221a48 | ||
|
|
d222b02b91 | ||
|
|
09634d2eb5 | ||
|
|
65bd75e2dc | ||
|
|
ccee2e4df4 | ||
|
|
bb9bb5e63c | ||
|
|
278b796e43 | ||
|
|
8e52d0c3c6 | ||
|
|
d339e09509 | ||
|
|
8ec460223c | ||
|
|
349d9d2fd0 | ||
|
|
e0aecb9c32 | ||
|
|
4a1ab70929 |
@@ -102,7 +102,6 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,30 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
return &ret
|
||||
}
|
||||
|
||||
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
|
||||
copy(ret[:], commitment)
|
||||
return
|
||||
}
|
||||
|
||||
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
if len(blobSidecars) == 0 {
|
||||
@@ -27,18 +47,121 @@ func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
return &ret
|
||||
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
|
||||
// This is more efficient than verifying each blob individually when len(blobs) > 1.
|
||||
// For single blob verification, it uses the optimized single verification path.
|
||||
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
|
||||
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
|
||||
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
|
||||
}
|
||||
|
||||
if len(blobs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Optimize for single blob case - use single verification to avoid batch overhead
|
||||
if len(blobs) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobs[0]),
|
||||
bytesToCommitment(commitments[0]),
|
||||
bytesToKZGProof(proofs[0]))
|
||||
}
|
||||
|
||||
// Use batch verification for multiple blobs
|
||||
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
|
||||
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
|
||||
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
|
||||
|
||||
for i := range blobs {
|
||||
if len(blobs[i]) != len(ckzg4844.Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
|
||||
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
|
||||
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
|
||||
}
|
||||
|
||||
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("batch KZG proof verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
|
||||
copy(ret[:], commitment)
|
||||
return
|
||||
}
|
||||
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
|
||||
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
|
||||
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
|
||||
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
|
||||
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
|
||||
blobCount := uint64(len(blobs))
|
||||
expectedCellProofs := blobCount * numberOfColumns
|
||||
|
||||
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
if uint64(len(cellProofs)) != expectedCellProofs {
|
||||
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
|
||||
}
|
||||
|
||||
if len(commitments) != len(blobs) {
|
||||
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
|
||||
}
|
||||
|
||||
if blobCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle multiple blobs - compute cells for all blobs
|
||||
allCells := make([]Cell, 0, expectedCellProofs)
|
||||
allCommitments := make([]Bytes48, 0, expectedCellProofs)
|
||||
allIndices := make([]uint64, 0, expectedCellProofs)
|
||||
allProofs := make([]Bytes48, 0, expectedCellProofs)
|
||||
|
||||
for blobIndex := range blobs {
|
||||
if len(blobs[blobIndex]) != len(Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
|
||||
}
|
||||
// Convert blob to kzg.Blob type
|
||||
blob := Blob(blobs[blobIndex])
|
||||
|
||||
// Compute cells for this blob
|
||||
cells, err := ComputeCells(&blob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
// Add cells and corresponding data for each column
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
|
||||
if len(commitments[blobIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
|
||||
}
|
||||
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
|
||||
}
|
||||
allCells = append(allCells, cells[columnIndex])
|
||||
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
|
||||
allIndices = append(allIndices, columnIndex)
|
||||
|
||||
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
|
||||
}
|
||||
}
|
||||
|
||||
// Batch verify all cells
|
||||
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cell batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("cell KZG proof batch verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -37,6 +37,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -45,3 +46,432 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob batch", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob batch", func(t *testing.T) {
|
||||
blobCount := 3
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
proofs[i] = proof[:]
|
||||
}
|
||||
|
||||
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched input lengths", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test different mismatch scenarios
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{},
|
||||
[][]byte{proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
|
||||
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:], blob[:]},
|
||||
[][]byte{commitment[:]},
|
||||
[][]byte{proof[:], proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
_, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a different blob's commitment (mismatch)
|
||||
differentBlob := random.GetRandBlob(456)
|
||||
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
// Single blob optimization uses different error message
|
||||
require.ErrorContains(t, "can't verify opening proof", err)
|
||||
})
|
||||
|
||||
t.Run("invalid proof should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use wrong proof
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "short buffer", err)
|
||||
})
|
||||
|
||||
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
|
||||
// First blob - valid
|
||||
blob1 := random.GetRandBlob(123)
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second blob - invalid proof
|
||||
blob2 := random.GetRandBlob(456)
|
||||
commitment2, _, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment2[:]}
|
||||
proofs := [][]byte{proof1[:], invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("batch KZG proof verification failed", func(t *testing.T) {
|
||||
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
|
||||
blob1 := random.GetRandBlob(123)
|
||||
blob2 := random.GetRandBlob(456)
|
||||
|
||||
// Generate valid proof for blob1
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate valid proof for blob2 but use wrong commitment (from blob1)
|
||||
_, proof2, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
|
||||
proofs := [][]byte{proof1[:], proof2[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch KZG proof verification failed", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create flattened cell proofs (like execution client format)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := range blobCount {
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs for this blob
|
||||
for j := range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched blob and commitment count", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{}, // Empty commitments
|
||||
[][]byte{},
|
||||
128,
|
||||
)
|
||||
require.ErrorContains(t, "expected 128 cell proofs", err)
|
||||
})
|
||||
|
||||
t.Run("wrong cell proof count", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
|
||||
wrongCellProofs := make([][]byte, 10)
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proofs should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Create invalid cell proofs (all zeros)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
invalidCellProofs[i] = make([]byte, 48) // All zeros
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("mismatched commitment should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and correct cell proofs
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate wrong commitment from different blob
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var differentBlob Blob
|
||||
copy(differentBlob[:], randBlob2[:])
|
||||
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
|
||||
})
|
||||
|
||||
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Create invalid blob (not properly formatted)
|
||||
invalidBlobData := make([]byte, 10) // Too short
|
||||
commitment := make([]byte, 48) // Dummy commitment
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{invalidBlobData}
|
||||
commitments := [][]byte{commitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
// Create invalid commitment (wrong size)
|
||||
invalidCommitment := make([]byte, 32) // Should be 48 bytes
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{invalidCommitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proof size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create invalid cell proofs (wrong size)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if i == 0 {
|
||||
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
|
||||
} else {
|
||||
invalidCellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
// First blob - valid
|
||||
randBlob1 := random.GetRandBlob(123)
|
||||
var blob1 Blob
|
||||
copy(blob1[:], randBlob1[:])
|
||||
commitment1, err := BlobToKZGCommitment(&blob1)
|
||||
require.NoError(t, err)
|
||||
blobs[0] = blob1[:]
|
||||
commitments[0] = commitment1[:]
|
||||
|
||||
// Second blob - use invalid commitment size
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var blob2 Blob
|
||||
copy(blob2[:], randBlob2[:])
|
||||
blobs[1] = blob2[:]
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
} else {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -159,7 +159,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -309,7 +309,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
|
||||
@@ -30,6 +30,10 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
|
||||
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
|
||||
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
@@ -454,6 +458,9 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if fCheckpoint.Epoch > jCheckpoint.Epoch {
|
||||
return ErrInvalidCheckpointArgs
|
||||
}
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
|
||||
@@ -375,6 +375,81 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_ErrorCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalizedEpoch primitives.Epoch
|
||||
justifiedEpoch primitives.Epoch
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "finalized epoch greater than justified epoch",
|
||||
finalizedEpoch: 5,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: ErrInvalidCheckpointArgs,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized equal to justified",
|
||||
finalizedEpoch: 3,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized less than justified",
|
||||
finalizedEpoch: 2,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
// Create a simple block for testing
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 10
|
||||
blk.Block.ParentRoot = service.originBlockRoot[:]
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
|
||||
// Create checkpoints with test case epochs
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.finalizedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.justifiedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
|
||||
// Set up forkchoice store to avoid other errors
|
||||
fcp := ðpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, service.originBlockRoot, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
t.Context(), wsb, finalizedCheckpoint, justifiedCheckpoint)
|
||||
|
||||
if tt.expectedError != nil {
|
||||
require.ErrorIs(t, err, tt.expectedError)
|
||||
} else {
|
||||
// For valid cases, we might get other errors (like block not being descendant of finalized)
|
||||
// but we shouldn't get the checkpoint validation error
|
||||
if err != nil && errors.Is(err, tt.expectedError) {
|
||||
t.Errorf("Unexpected checkpoint validation error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
//
|
||||
// /- B1
|
||||
@@ -2132,13 +2207,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
// Service's store has the finalized state as headRoot
|
||||
// Service's store has the justified checkpoint root as headRoot (verified below through justified checkpoint comparison)
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
require.NotEqual(t, bytesutil.ToBytes32(params.BeaconConfig().ZeroHash[:]), bytesutil.ToBytes32(headRoot)) // Ensure head is not zero
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, true, optimistic) // Head is now optimistic when starting from justified checkpoint
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
// last valid state's justified checkpoint
|
||||
|
||||
@@ -40,8 +40,8 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
@@ -70,7 +70,7 @@ type SlashingReceiver interface {
|
||||
// 1. Validate block, apply state transition and update checkpoints
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block is blacklisted
|
||||
@@ -244,7 +244,7 @@ func (s *Service) handleDA(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
avs das.AvailabilityStore,
|
||||
avs das.AvailabilityChecker,
|
||||
) (elapsed time.Duration, err error) {
|
||||
defer func(start time.Time) {
|
||||
elapsed = time.Since(start)
|
||||
@@ -333,7 +333,7 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
return errors.Wrap(err, "could not set up forkchoice tree")
|
||||
}
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
@@ -30,24 +30,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
|
||||
if headStr == "" {
|
||||
return fRoot
|
||||
return jRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
@@ -275,7 +275,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -305,7 +305,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityChecker) error {
|
||||
if s.ReceiveBlockMockErr != nil {
|
||||
return s.ReceiveBlockMockErr
|
||||
}
|
||||
|
||||
@@ -317,23 +317,15 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
}
|
||||
|
||||
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
|
||||
|
||||
originalStateSlot := state.Slot()
|
||||
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
}
|
||||
// Set the state's current slot.
|
||||
if err := state.SetSlot(slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the proposer index for the current slot.
|
||||
i, err := BeaconProposerIndex(ctx, state)
|
||||
i, err := BeaconProposerIndexAtSlot(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", slot)
|
||||
}
|
||||
|
||||
// Append the slot to the proposer's assignments.
|
||||
@@ -342,12 +334,6 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
}
|
||||
proposerAssignments[i] = append(proposerAssignments[i], slot)
|
||||
}
|
||||
|
||||
// Reset state back to its original slot.
|
||||
if err := state.SetSlot(originalStateSlot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return proposerAssignments, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -309,23 +309,29 @@ func beaconProposerIndexAtSlotFulu(state state.ReadOnlyBeaconState, slot primiti
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
spe := params.BeaconConfig().SlotsPerEpoch
|
||||
if e == stateEpoch {
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
return lookAhead[slot%spe], nil
|
||||
}
|
||||
// The caller is requesting the proposer for the next epoch
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch+params.BeaconConfig().SlotsPerEpoch], nil
|
||||
return lookAhead[spe+slot%spe], nil
|
||||
}
|
||||
|
||||
// BeaconProposerIndexAtSlot returns proposer index at the given slot from the
|
||||
// point of view of the given state as head state
|
||||
func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
if state.Version() >= version.Fulu {
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(state.Slot())
|
||||
// Even if the state is post Fulu, we may request a past proposer index.
|
||||
if state.Version() >= version.Fulu && e >= params.BeaconConfig().FuluForkEpoch {
|
||||
// We can use the cached lookahead only for the current and the next epoch.
|
||||
if e == stateEpoch || e == stateEpoch+1 {
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
}
|
||||
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
|
||||
// For simplicity, the node will skip caching of genesis epoch.
|
||||
if e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
// For simplicity, the node will skip caching of genesis epoch. If the passed state has not yet reached this slot then we do not check the cache.
|
||||
if e <= stateEpoch && e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
s, err := slots.EpochEnd(e - 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -1161,6 +1161,10 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
lookahead := make([]uint64, 64)
|
||||
lookahead[0] = 15
|
||||
lookahead[1] = 16
|
||||
@@ -1180,8 +1184,4 @@ func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 130)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(42), idx)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 95)
|
||||
require.ErrorContains(t, "slot 95 is not in the current epoch 3 or the next epoch", err)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 160)
|
||||
require.ErrorContains(t, "slot 160 is not in the current epoch 3 or the next epoch", err)
|
||||
}
|
||||
|
||||
@@ -107,3 +107,104 @@ func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCac
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// ColumnIndices is a map of column indices where the key is the column index and the value is a boolean.
|
||||
// The boolean could indicate different things, eg whether the column is needed (in the context of satisfying custody requirements)
|
||||
// or present (in the context of a custody check on disk or in cache).
|
||||
type ColumnIndices map[uint64]bool
|
||||
|
||||
func (ci ColumnIndices) Has(index uint64) bool {
|
||||
_, ok := ci[index]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (ci ColumnIndices) Count() int {
|
||||
return len(ci)
|
||||
}
|
||||
|
||||
func (ci ColumnIndices) Set(index uint64) {
|
||||
ci[index] = true
|
||||
}
|
||||
|
||||
func (ci ColumnIndices) Unset(index uint64) {
|
||||
delete(ci, index)
|
||||
}
|
||||
|
||||
func (ci ColumnIndices) Copy() ColumnIndices {
|
||||
newCi := make(ColumnIndices, len(ci))
|
||||
for index, set := range ci {
|
||||
newCi[index] = set
|
||||
}
|
||||
return newCi
|
||||
}
|
||||
|
||||
func (ci ColumnIndices) Intersection(other ColumnIndices) ColumnIndices {
|
||||
result := make(ColumnIndices)
|
||||
for index := range ci {
|
||||
if other.Has(index) {
|
||||
result.Set(index)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Union mutates the receiver so that any index that is set in either of
|
||||
// the two ColumnIndices is set in the receiver after the function finishes.
|
||||
// It does not mutate the other ColumnIndices given as a function argument.
|
||||
func (ci ColumnIndices) Union(other ColumnIndices) {
|
||||
for index := range other {
|
||||
ci.Set(index)
|
||||
}
|
||||
}
|
||||
|
||||
func (ci ColumnIndices) ToMap() map[uint64]bool {
|
||||
m := make(map[uint64]bool, len(ci))
|
||||
for index, set := range ci {
|
||||
m[index] = set
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// ToSlice converts a ColumnIndices into a slice of uint64 indices.
|
||||
func (ci ColumnIndices) ToSlice() []uint64 {
|
||||
indices := make([]uint64, 0, len(ci))
|
||||
for index := range ci {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
return indices
|
||||
}
|
||||
|
||||
func MapFromColumnIndices(ci ColumnIndices) map[uint64]bool {
|
||||
m := make(map[uint64]bool, len(ci))
|
||||
for index, set := range ci {
|
||||
m[index] = set
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromSlice creates a ColumnIndices from a slice of uint64.
|
||||
// Unlike the untyped map, this explicitly indicates that the boolean value is meaningful.
|
||||
func NewColumnIndicesFromSlice(indices []uint64) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for _, index := range indices {
|
||||
ci[index] = true
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromMap creates a ColumnIndices from a map of uint64 to bool.
|
||||
// Unlike the untyped map, this explicitly indicates that the boolean value is meaningful.
|
||||
func NewColumnIndicesFromMap(indices map[uint64]bool) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for index, set := range indices {
|
||||
if !set {
|
||||
continue
|
||||
}
|
||||
ci[index] = true
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
func NewColumnIndices() ColumnIndices {
|
||||
return make(ColumnIndices)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"bisect.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
@@ -12,6 +14,7 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -21,6 +24,7 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -30,11 +34,13 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -45,6 +51,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -29,7 +29,7 @@ type LazilyPersistentStoreBlob struct {
|
||||
verifier BlobBatchVerifier
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreBlob{}
|
||||
var _ AvailabilityChecker = &LazilyPersistentStoreBlob{}
|
||||
|
||||
// BlobBatchVerifier enables LazyAvailabilityStore to manage the verification process
|
||||
// going from ROBlob->VerifiedROBlob, while avoiding the decision of which individual verifications
|
||||
@@ -81,7 +81,16 @@ func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ..
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
for _, b := range blks {
|
||||
if err := s.checkOne(ctx, current, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreBlob) checkOne(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check data availability for block %#x", b.Root())
|
||||
|
||||
229
beacon-chain/das/availability_columns.go
Normal file
229
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
cache *dataColumnCache
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custody *custodyRequirement
|
||||
bisector Bisector
|
||||
}
|
||||
|
||||
var _ AvailabilityChecker = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
nodeID enode.ID,
|
||||
cgc uint64,
|
||||
bisector Bisector,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custody: &custodyRequirement{nodeID: nodeID, cgc: cgc},
|
||||
bisector: bisector,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.RODataColumn) error {
|
||||
currentEpoch := slots.ToEpoch(current)
|
||||
for _, sidecar := range sidecars {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sidecar.Slot()), currentEpoch) {
|
||||
continue
|
||||
}
|
||||
if err := s.cache.stash(sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, current primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
currentEpoch := slots.ToEpoch(current)
|
||||
|
||||
toVerify := make([]blocks.RODataColumn, 0)
|
||||
for _, block := range blks {
|
||||
indices, err := s.required(block, currentEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), current)
|
||||
}
|
||||
if indices.Count() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := cacheKey{slot: block.Block().Slot(), root: block.Root()}
|
||||
entry := s.cache.ensure(key)
|
||||
toVerify, err = entry.append(toVerify, block.Root(), indicesNotStored(s.store.Summary(block.Root()), indices))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.verifyAndSave(toVerify); err != nil {
|
||||
log.Warn("Batch verification failed, bisecting columns by peer")
|
||||
return s.bisectVerification(toVerify)
|
||||
}
|
||||
|
||||
s.cache.cleanup(blks...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// required returns the set of column indices to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) required(block blocks.ROBlock, current primitives.Epoch) (peerdas.ColumnIndices, error) {
|
||||
eBlk := slots.ToEpoch(block.Block().Slot())
|
||||
eFulu := params.BeaconConfig().FuluForkEpoch
|
||||
if current < eFulu || eBlk < eFulu || !params.WithinDAPeriod(eBlk, current) {
|
||||
return peerdas.NewColumnIndices(), nil
|
||||
}
|
||||
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
// No DA check needed if the block has no blobs.
|
||||
if len(commitments) == 0 {
|
||||
return peerdas.NewColumnIndices(), nil
|
||||
}
|
||||
|
||||
return s.custody.required(current)
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreColumn) verifyAndSave(columns []blocks.RODataColumn) error {
|
||||
verified, err := s.verifyColumns(columns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verify columns")
|
||||
}
|
||||
if err := s.store.Save(verified); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreColumn) verifyColumns(columns []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
verifier := s.newDataColumnsVerifier(columns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return nil, errors.Wrap(err, "valid fields")
|
||||
}
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
return verifier.VerifiedRODataColumns()
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreColumn) bisectVerification(columns []blocks.RODataColumn) error {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.bisector == nil {
|
||||
return errors.New("bisector not initialized")
|
||||
}
|
||||
if err := s.bisector.Bisect(columns); err != nil {
|
||||
return errors.Wrap(err, "Bisector.Bisect")
|
||||
}
|
||||
for columns, err := s.bisector.Next(); columns != nil; columns, err = s.bisector.Next() {
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if err := s.verifyAndSave(s.columnsNotStored(columns)); err != nil {
|
||||
s.bisector.OnError(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return s.bisector.Error()
|
||||
}
|
||||
|
||||
// columnsNotStored filters the list of ROColumnSidecars to only include those that are not found in the storage summary.
|
||||
func (s *LazilyPersistentStoreColumn) columnsNotStored(sidecars []blocks.RODataColumn) []blocks.RODataColumn {
|
||||
// We use this method to filter a set of sidecars that were previously seen to be unavailable on disk. So our base assumption
|
||||
// is that they are still available and we don't need to copy the list. Instead we make a slice of any indices that are unexpectedly
|
||||
// stored and only when we find that the storage view has changed do we need to create a new slice.
|
||||
stored := make(map[int]struct{}, 0)
|
||||
lastRoot := [32]byte{}
|
||||
var sum filesystem.DataColumnStorageSummary
|
||||
for i, sc := range sidecars {
|
||||
if sc.BlockRoot() != lastRoot {
|
||||
sum = s.store.Summary(sc.BlockRoot())
|
||||
}
|
||||
if sum.HasIndex(sc.Index) {
|
||||
stored[i] = struct{}{}
|
||||
}
|
||||
}
|
||||
// If the view on storage hasn't changed, return the original list.
|
||||
if len(stored) == 0 {
|
||||
return sidecars
|
||||
}
|
||||
shift := 0
|
||||
for i := range sidecars {
|
||||
if _, ok := stored[i]; ok {
|
||||
// If the index is stored, skip and overwrite it.
|
||||
// Track how many spaces down to shift unseen sidecars (to overwrite the previously shifted or seen).
|
||||
shift++
|
||||
continue
|
||||
}
|
||||
if shift > 0 {
|
||||
// If the index is not stored and we have seen stored indices,
|
||||
// we need to shift the current index down.
|
||||
sidecars[i-shift] = sidecars[i]
|
||||
}
|
||||
}
|
||||
return sidecars[:len(sidecars)-shift]
|
||||
}
|
||||
|
||||
type custodyRequirement struct {
|
||||
nodeID enode.ID
|
||||
cgc uint64 // custody group count
|
||||
current primitives.Epoch
|
||||
requirement peerdas.ColumnIndices
|
||||
}
|
||||
|
||||
func (c *custodyRequirement) required(current primitives.Epoch) (peerdas.ColumnIndices, error) {
|
||||
if c.current != current {
|
||||
peerInfo, _, err := peerdas.Info(c.nodeID, max(c.cgc, params.BeaconConfig().SamplesPerSlot))
|
||||
if err != nil {
|
||||
return peerdas.NewColumnIndices(), errors.Wrap(err, "peer info")
|
||||
}
|
||||
c.requirement = peerdas.NewColumnIndicesFromMap(peerInfo.CustodyColumns)
|
||||
c.current = current
|
||||
}
|
||||
return c.requirement, nil
|
||||
}
|
||||
302
beacon-chain/das/availability_columns_test.go
Normal file
302
beacon-chain/das/availability_columns_test.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, nil, enode.ID{}, 0, nil)
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
}
|
||||
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, nil, enode.ID{}, 0, nil)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, nil, enode.ID{}, 0, nil)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
store := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
avs := NewLazilyPersistentStoreColumn(store, nil, enode.ID{}, 0, nil)
|
||||
|
||||
err := avs.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(avs.cache.entries))
|
||||
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := avs.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
summary := store.Summary(key.root)
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), summary.Count())
|
||||
|
||||
require.DeepSSZEqual(t, roDataColumns[0], entry.scs[1])
|
||||
require.DeepSSZEqual(t, roDataColumns[1], entry.scs[5])
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[uint64]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, newDataColumnsVerifier, enode.ID{}, 0, nil)
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, newDataColumnsVerifier, enode.ID{}, 0, nil)
|
||||
|
||||
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
key := cacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = 100
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, nil, enode.ID{}, numberOfColumns, nil)
|
||||
|
||||
commitmentsArray, err := s.required(b, slots.ToEpoch(tc.slot))
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, commitments := range commitmentsArray {
|
||||
require.DeepEqual(t, tc.commitments, commitments)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
15
beacon-chain/das/bisect.go
Normal file
15
beacon-chain/das/bisect.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrBisectInconsistent = errors.New("state of bisector inconsistent with columns to bisect")
|
||||
|
||||
type Bisector interface {
|
||||
Bisect([]blocks.RODataColumn) error
|
||||
Next() ([]blocks.RODataColumn, error)
|
||||
OnError(error)
|
||||
Error() error
|
||||
}
|
||||
@@ -1,9 +1,7 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -29,103 +27,75 @@ func newDataColumnCache() *dataColumnCache {
|
||||
func (c *dataColumnCache) ensure(key cacheKey) *dataColumnCacheEntry {
|
||||
entry, ok := c.entries[key]
|
||||
if !ok {
|
||||
entry = &dataColumnCacheEntry{}
|
||||
entry = newDataColumnCacheEntry()
|
||||
c.entries[key] = entry
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
func (c *dataColumnCache) cleanup(blks ...blocks.ROBlock) {
|
||||
for _, block := range blks {
|
||||
key := cacheKey{slot: block.Block().Slot(), root: block.Root()}
|
||||
c.delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
// delete removes the cache entry from the cache.
|
||||
func (c *dataColumnCache) delete(key cacheKey) {
|
||||
delete(c.entries, key)
|
||||
}
|
||||
|
||||
// dataColumnCacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type dataColumnCacheEntry struct {
|
||||
scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.DataColumnStorageSummary
|
||||
func (c *dataColumnCache) stash(sc blocks.RODataColumn) error {
|
||||
key := cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
entry := c.ensure(key)
|
||||
return entry.stash(sc)
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) setDiskSummary(sum filesystem.DataColumnStorageSummary) {
|
||||
e.diskSummary = sum
|
||||
func newDataColumnCacheEntry() *dataColumnCacheEntry {
|
||||
return &dataColumnCacheEntry{scs: make(map[uint64]blocks.RODataColumn)}
|
||||
}
|
||||
|
||||
// dataColumnCacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type dataColumnCacheEntry struct {
|
||||
scs map[uint64]blocks.RODataColumn
|
||||
}
|
||||
|
||||
// stash adds an item to the in-memory cache of DataColumnSidecars.
|
||||
// Only the first DataColumnSidecar of a given Index will be kept in the cache.
|
||||
// stash will return an error if the given data colunn is already in the cache, or if the Index is out of bounds.
|
||||
func (e *dataColumnCacheEntry) stash(sc *blocks.RODataColumn) error {
|
||||
// stash will return an error if the given data column is already in the cache, or if the Index is out of bounds.
|
||||
func (e *dataColumnCacheEntry) stash(sc blocks.RODataColumn) error {
|
||||
if sc.Index >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errColumnIndexTooHigh, "index=%d", sc.Index)
|
||||
}
|
||||
|
||||
if e.scs[sc.Index] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.Index, sc.KzgCommitments)
|
||||
}
|
||||
|
||||
e.scs[sc.Index] = sc
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *dataColumnCacheEntry) filter(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
// append appends the requested root and indices from the cache to the given sidecars slice and returns the result.
|
||||
// If any of the given indices are missing, an error will be returned and the sidecars slice will be unchanged.
|
||||
func (e *dataColumnCacheEntry) append(sidecars []blocks.RODataColumn, root [32]byte, indices peerdas.ColumnIndices) ([]blocks.RODataColumn, error) {
|
||||
needed := indices.ToMap()
|
||||
for col := range needed {
|
||||
_, ok := e.scs[col]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, col)
|
||||
}
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := range nonEmptyIndices {
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.scs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !sliceBytesEqual(commitmentsArray[i], e.scs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.scs[i])
|
||||
// Loop twice so we can avoid touching the slice if any of the blobs are missing.
|
||||
for col := range needed {
|
||||
sidecars = append(sidecars, e.scs[col])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
// indicesNotStored filters the list of indices to only include those that are not found in the storage summary.
|
||||
func indicesNotStored(sum filesystem.DataColumnStorageSummary, indices peerdas.ColumnIndices) peerdas.ColumnIndices {
|
||||
indices = indices.Copy()
|
||||
for col := range indices {
|
||||
if sum.HasIndex(col) {
|
||||
indices.Unset(col)
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
func sliceBytesEqual(a, b [][]byte) bool {
|
||||
return slices.EqualFunc(a, b, bytes.Equal)
|
||||
return indices
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package das
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -16,10 +17,8 @@ func TestEnsureDeleteSetDiskSummary(t *testing.T) {
|
||||
entry := c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{true})
|
||||
entry.setDiskSummary(diskSummary)
|
||||
entry = c.ensure(key)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{diskSummary: diskSummary}, *entry)
|
||||
require.DeepEqual(t, dataColumnCacheEntry{}, *entry)
|
||||
|
||||
c.delete(key)
|
||||
entry = c.ensure(key)
|
||||
@@ -28,10 +27,10 @@ func TestEnsureDeleteSetDiskSummary(t *testing.T) {
|
||||
|
||||
func TestStash(t *testing.T) {
|
||||
t.Run("Index too high", func(t *testing.T) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 10_000}})
|
||||
columns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 10_000}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
err := entry.stash(columns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
@@ -39,98 +38,47 @@ func TestStash(t *testing.T) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var entry dataColumnCacheEntry
|
||||
err := entry.stash(&roDataColumns[0])
|
||||
err := entry.stash(roDataColumns[0])
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, roDataColumns[0], entry.scs[1])
|
||||
|
||||
err = entry.stash(&roDataColumns[0])
|
||||
err = entry.stash(roDataColumns[0])
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilterDataColumns(t *testing.T) {
|
||||
func TestAppendDataColumns(t *testing.T) {
|
||||
t.Run("All available", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
sum := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true, false, true})
|
||||
notStored := indicesNotStored(sum, peerdas.NewColumnIndicesFromSlice([]uint64{1, 3}))
|
||||
actual, err := newDataColumnCacheEntry().append([]blocks.RODataColumn{}, [fieldparams.RootLength]byte{}, notStored)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("Some scs missing", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
sum := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
|
||||
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{})
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{diskSummary: diskSummary}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter([fieldparams.RootLength]byte{}, &commitmentsArray)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Commitments not equal", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}}
|
||||
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 1}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[1] = &roDataColumns[0]
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
_, err := dataColumnCacheEntry.filter(roDataColumns[0].BlockRoot(), &commitmentsArray)
|
||||
notStored := indicesNotStored(sum, peerdas.NewColumnIndicesFromSlice([]uint64{1}))
|
||||
actual, err := newDataColumnCacheEntry().append([]blocks.RODataColumn{}, [fieldparams.RootLength]byte{}, notStored)
|
||||
require.Equal(t, 0, len(actual))
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
commitmentsArray := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
diskSummary := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
indices := peerdas.NewColumnIndicesFromSlice([]uint64{1, 3})
|
||||
expected, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: 3, KzgCommitments: [][]byte{[]byte{3}}}})
|
||||
|
||||
var scs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
scs[3] = &expected[0]
|
||||
scs := map[uint64]blocks.RODataColumn{
|
||||
3: expected[0],
|
||||
}
|
||||
sum := filesystem.NewDataColumnStorageSummary(42, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
entry := dataColumnCacheEntry{scs: scs}
|
||||
|
||||
dataColumnCacheEntry := dataColumnCacheEntry{scs: scs, diskSummary: diskSummary}
|
||||
|
||||
actual, err := dataColumnCacheEntry.filter(expected[0].BlockRoot(), &commitmentsArray)
|
||||
actual, err := entry.append([]blocks.RODataColumn{}, expected[0].BlockRoot(), indicesNotStored(sum, indices))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{1}}, nil, [][]byte{[]byte{3}}}
|
||||
require.Equal(t, 2, s.count())
|
||||
}
|
||||
|
||||
func TestNonEmptyIndices(t *testing.T) {
|
||||
s := safeCommitmentsArray{nil, [][]byte{[]byte{10}}, nil, [][]byte{[]byte{20}}}
|
||||
actual := s.nonEmptyIndices()
|
||||
require.DeepEqual(t, map[uint64]bool{1: true, 3: true}, actual)
|
||||
}
|
||||
|
||||
func TestSliceBytesEqual(t *testing.T) {
|
||||
t.Run("Different lengths", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
require.Equal(t, false, sliceBytesEqual(a, b))
|
||||
})
|
||||
|
||||
t.Run("Same length but different content", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 7}}
|
||||
require.Equal(t, false, sliceBytesEqual(a, b))
|
||||
})
|
||||
|
||||
t.Run("Equal slices", func(t *testing.T) {
|
||||
a := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
b := [][]byte{[]byte{1, 2, 3}, []byte{4, 5, 6}}
|
||||
require.Equal(t, true, sliceBytesEqual(a, b))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -14,6 +14,12 @@ import (
|
||||
// IsDataAvailable guarantees that all blobs committed to in the block have been
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
AvailabilityChecker
|
||||
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
// AvailabilityChecker is the minimum interface needed to check if data is available for a block.
|
||||
// We should prefer this interface over AvailabilityStore in places where we don't need to persist blob data.
|
||||
type AvailabilityChecker interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b ...blocks.ROBlock) error
|
||||
}
|
||||
|
||||
@@ -9,16 +9,16 @@ import (
|
||||
|
||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||
type MockAvailabilityStore struct {
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b ...blocks.ROBlock) error
|
||||
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
var _ AvailabilityChecker = &MockAvailabilityStore{}
|
||||
|
||||
// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b ...blocks.ROBlock) error {
|
||||
if m.VerifyAvailabilityCallback != nil {
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b)
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -941,6 +941,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -1110,7 +1111,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.DataColumnStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error initializing backfill service")
|
||||
}
|
||||
|
||||
@@ -155,6 +155,7 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
"agent": agentString(pid, s.Host()),
|
||||
})
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
@@ -269,6 +270,7 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
metaData: tc.metadata,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
@@ -329,6 +331,7 @@ func TestCustodyGroupCountFromPeerENR(t *testing.T) {
|
||||
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
actual := service.custodyGroupCountFromPeerENR(pid)
|
||||
|
||||
@@ -684,7 +684,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
|
||||
peerData, multiAddrs, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
log.WithError(err).WithField("node", node.String()).Debug("Could not convert to peer data")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -851,7 +851,7 @@ func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
|
||||
multiAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, errors.Wrap(err, "retrieve multiaddrs from node")
|
||||
}
|
||||
|
||||
if len(multiAddrs) == 0 {
|
||||
|
||||
@@ -42,7 +42,7 @@ func (a *Assigner) freshPeers() ([]peer.ID, error) {
|
||||
if flags.Get().MinimumSyncPeers < required {
|
||||
required = flags.Get().MinimumSyncPeers
|
||||
}
|
||||
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.fc.FinalizedCheckpoint().Epoch)
|
||||
_, peers := a.ps.BestFinalized(-1, a.fc.FinalizedCheckpoint().Epoch)
|
||||
if len(peers) < required {
|
||||
log.WithFields(logrus.Fields{
|
||||
"suitable": len(peers),
|
||||
@@ -52,27 +52,33 @@ func (a *Assigner) freshPeers() ([]peer.ID, error) {
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
type AssignmentFilter func([]peer.ID) []peer.ID
|
||||
|
||||
// Assign uses the "BestFinalized" method to select the best peers that agree on a canonical block
|
||||
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
|
||||
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
|
||||
// the number of outbound requests to each peer from a given component.
|
||||
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
func (a *Assigner) Assign(filter AssignmentFilter) ([]peer.ID, error) {
|
||||
best, err := a.freshPeers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pickBest(busy, n, best), nil
|
||||
return filter(best), nil
|
||||
}
|
||||
|
||||
func pickBest(busy map[peer.ID]bool, n int, best []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0, n)
|
||||
for _, p := range best {
|
||||
if len(ps) == n {
|
||||
return ps
|
||||
}
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
// NotBusy is a filter that returns a list of peer.IDs with len() <= n, which are not in the `busy` map.
|
||||
// n == -1 will return all peers that are not busy.
|
||||
func NotBusy(busy map[peer.ID]bool, n int) AssignmentFilter {
|
||||
return func(peers []peer.ID) []peer.ID {
|
||||
ps := make([]peer.ID, 0)
|
||||
for _, p := range peers {
|
||||
if n > 0 && len(ps) == n {
|
||||
return ps
|
||||
}
|
||||
if !busy[p] {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
return ps
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
@@ -18,8 +18,9 @@ func TestPickBest(t *testing.T) {
|
||||
expected []peer.ID
|
||||
}{
|
||||
{
|
||||
name: "",
|
||||
n: 0,
|
||||
name: "don't limit",
|
||||
n: 0,
|
||||
expected: best,
|
||||
},
|
||||
{
|
||||
name: "none busy",
|
||||
@@ -88,7 +89,8 @@ func TestPickBest(t *testing.T) {
|
||||
if c.best == nil {
|
||||
c.best = best
|
||||
}
|
||||
pb := pickBest(c.busy, c.n, c.best)
|
||||
filt := NotBusy(c.busy, c.n)
|
||||
pb := filt(c.best)
|
||||
require.Equal(t, len(c.expected), len(pb))
|
||||
for i := range c.expected {
|
||||
require.Equal(t, c.expected[i], pb[i])
|
||||
|
||||
@@ -705,76 +705,59 @@ func (p *Status) deprecatedPrune() {
|
||||
p.tallyIPTracker()
|
||||
}
|
||||
|
||||
// BestFinalized returns the highest finalized epoch equal to or higher than `ourFinalizedEpoch`
|
||||
// that is agreed upon by the majority of peers, and the peers agreeing on this finalized epoch.
|
||||
// This method may not return the absolute highest finalized epoch, but the finalized epoch in which
|
||||
// most peers can serve blocks (plurality voting). Ideally, all peers would be reporting the same
|
||||
// finalized epoch but some may be behind due to their own latency, or because of their finalized
|
||||
// epoch at the time we queried them.
|
||||
func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
// Retrieve all connected peers.
|
||||
// BestFinalized groups all peers by their last known finalized epoch
|
||||
// and selects the epoch of the largest group as best.
|
||||
// Any peer with a finalized epoch < ourFinalizedEpoch is excluded from consideration.
|
||||
// In the event of a tie in largest group size, the higher epoch is the tie breaker.
|
||||
// The selected epoch is returned, along with a list of peers with a finalized epoch >= the selected epoch.
|
||||
// `maxPeers` > 0: truncate the []peer.ID result at maxPeers, ie best[:maxPeers]
|
||||
// `maxPeers` <= 0: return all peers in agreement, ie best[:]
|
||||
func (p *Status) BestFinalized(maxPeers int, ourFinalized primitives.Epoch) (primitives.Epoch, []peer.ID) {
|
||||
connected := p.Connected()
|
||||
pids := make([]peer.ID, 0, len(connected))
|
||||
views := make(map[peer.ID]*pb.StatusV2, len(connected))
|
||||
|
||||
// key: finalized epoch, value: number of peers that support this finalized epoch.
|
||||
finalizedEpochVotes := make(map[primitives.Epoch]uint64)
|
||||
|
||||
// key: peer ID, value: finalized epoch of the peer.
|
||||
pidEpoch := make(map[peer.ID]primitives.Epoch, len(connected))
|
||||
|
||||
// key: peer ID, value: head slot of the peer.
|
||||
pidHead := make(map[peer.ID]primitives.Slot, len(connected))
|
||||
|
||||
potentialPIDs := make([]peer.ID, 0, len(connected))
|
||||
votes := make(map[primitives.Epoch]uint64)
|
||||
winner := primitives.Epoch(0)
|
||||
for _, pid := range connected {
|
||||
peerChainState, err := p.ChainState(pid)
|
||||
|
||||
// Skip if the peer's finalized epoch is not defined, or if the peer's finalized epoch is
|
||||
// lower than ours.
|
||||
if err != nil || peerChainState == nil || peerChainState.FinalizedEpoch < ourFinalizedEpoch {
|
||||
view, err := p.ChainState(pid)
|
||||
if err != nil || view == nil || view.FinalizedEpoch < ourFinalized {
|
||||
continue
|
||||
}
|
||||
pids = append(pids, pid)
|
||||
views[pid] = view
|
||||
|
||||
finalizedEpochVotes[peerChainState.FinalizedEpoch]++
|
||||
|
||||
pidEpoch[pid] = peerChainState.FinalizedEpoch
|
||||
pidHead[pid] = peerChainState.HeadSlot
|
||||
|
||||
potentialPIDs = append(potentialPIDs, pid)
|
||||
}
|
||||
|
||||
// Select the target epoch, which is the epoch most peers agree upon.
|
||||
// If there is a tie, select the highest epoch.
|
||||
targetEpoch, mostVotes := primitives.Epoch(0), uint64(0)
|
||||
for epoch, count := range finalizedEpochVotes {
|
||||
if count > mostVotes || (count == mostVotes && epoch > targetEpoch) {
|
||||
mostVotes = count
|
||||
targetEpoch = epoch
|
||||
votes[view.FinalizedEpoch]++
|
||||
if winner == 0 {
|
||||
winner = view.FinalizedEpoch
|
||||
continue
|
||||
}
|
||||
e, v := view.FinalizedEpoch, votes[view.FinalizedEpoch]
|
||||
if v > votes[winner] || (v == votes[winner] && e > winner) {
|
||||
winner = e
|
||||
}
|
||||
}
|
||||
|
||||
// Sort PIDs by finalized (epoch, head), in decreasing order.
|
||||
sort.Slice(potentialPIDs, func(i, j int) bool {
|
||||
if pidEpoch[potentialPIDs[i]] == pidEpoch[potentialPIDs[j]] {
|
||||
return pidHead[potentialPIDs[i]] > pidHead[potentialPIDs[j]]
|
||||
// Descending sort by (finalized, head).
|
||||
sort.Slice(pids, func(i, j int) bool {
|
||||
iv, jv := views[pids[i]], views[pids[j]]
|
||||
if iv.FinalizedEpoch == jv.FinalizedEpoch {
|
||||
return iv.HeadSlot > jv.HeadSlot
|
||||
}
|
||||
|
||||
return pidEpoch[potentialPIDs[i]] > pidEpoch[potentialPIDs[j]]
|
||||
return iv.FinalizedEpoch > jv.FinalizedEpoch
|
||||
})
|
||||
|
||||
// Trim potential peers to those on or after target epoch.
|
||||
for i, pid := range potentialPIDs {
|
||||
if pidEpoch[pid] < targetEpoch {
|
||||
potentialPIDs = potentialPIDs[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
// Find the first peer with finalized epoch < winner, trim and all following (lower) peers.
|
||||
trim := sort.Search(len(pids), func(i int) bool {
|
||||
return views[pids[i]].FinalizedEpoch < winner
|
||||
})
|
||||
pids = pids[:trim]
|
||||
|
||||
// Trim potential peers to at most maxPeers.
|
||||
if len(potentialPIDs) > maxPeers {
|
||||
potentialPIDs = potentialPIDs[:maxPeers]
|
||||
if maxPeers > 0 && len(pids) > maxPeers {
|
||||
return winner, pids[:maxPeers]
|
||||
}
|
||||
|
||||
return targetEpoch, potentialPIDs
|
||||
return winner, pids
|
||||
}
|
||||
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
|
||||
@@ -63,6 +63,7 @@ type TestP2P struct {
|
||||
custodyInfoMut sync.RWMutex // protects custodyGroupCount and earliestAvailableSlot
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
enr *enr.Record
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
@@ -103,6 +104,7 @@ func NewTestP2P(t *testing.T, userOptions ...config.Option) *TestP2P {
|
||||
pubsub: ps,
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
peers: peerStatuses,
|
||||
enr: new(enr.Record),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -310,8 +312,8 @@ func (p *TestP2P) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
func (*TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
func (p *TestP2P) ENR() *enr.Record {
|
||||
return p.enr
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -60,7 +61,6 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -84,6 +84,7 @@ go_test(
|
||||
"//api:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
@@ -124,7 +125,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -32,7 +33,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -942,14 +942,13 @@ func decodePhase0JSON(body []byte) (*eth.GenericSignedBeaconBlock, error) {
|
||||
// broadcastSidecarsIfSupported broadcasts blob sidecars when an equivocated block occurs.
|
||||
func broadcastSidecarsIfSupported(ctx context.Context, s *Server, b interfaces.SignedBeaconBlock, gb *eth.GenericSignedBeaconBlock, versionHeader string) error {
|
||||
switch versionHeader {
|
||||
case version.String(version.Fulu):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetFulu().Blobs, gb.GetFulu().KzgProofs)
|
||||
case version.String(version.Electra):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetElectra().Blobs, gb.GetElectra().KzgProofs)
|
||||
case version.String(version.Deneb):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetDeneb().Blobs, gb.GetDeneb().KzgProofs)
|
||||
default:
|
||||
// other forks before Deneb do not support blob sidecars
|
||||
// forks after fulu do not support blob sidecars, instead support data columns, no need to rebroadcast
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1053,7 +1052,7 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.validateBlobSidecars(blk, blobs, proofs); err != nil {
|
||||
if err := s.validateBlobs(blk, blobs, proofs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1067,23 +1066,41 @@ func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
func (s *Server) validateBlobs(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
kzgs, err := blk.Block().Body().BlobKzgCommitments()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
commitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments")
|
||||
}
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(kzgs) {
|
||||
return errors.New("number of blobs, proofs, and commitments do not match")
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blk.Block().Slot())
|
||||
if len(blobs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("number of blobs over max, %d > %d", len(blobs), maxBlobsPerBlock)
|
||||
}
|
||||
for i, blob := range blobs {
|
||||
b := kzg4844.Blob(blob)
|
||||
if err := kzg4844.VerifyBlobProof(&b, kzg4844.Commitment(kzgs[i]), kzg4844.Proof(proofs[i])); err != nil {
|
||||
return errors.Wrap(err, "could not verify blob proof")
|
||||
if blk.Version() >= version.Fulu {
|
||||
// For Fulu blocks, proofs are cell proofs (blobs * numberOfColumns)
|
||||
expectedProofsCount := uint64(len(blobs)) * numberOfColumns
|
||||
if uint64(len(proofs)) != expectedProofsCount || len(blobs) != len(commitments) {
|
||||
return fmt.Errorf("number of blobs (%d), cell proofs (%d), and commitments (%d) do not match (expected %d cell proofs)", len(blobs), len(proofs), len(commitments), expectedProofsCount)
|
||||
}
|
||||
// For Fulu blocks, proofs are cell proofs from execution client's BlobsBundleV2
|
||||
// Verify cell proofs directly without reconstructing data column sidecars
|
||||
if err := kzg.VerifyCellKZGProofBatchFromBlobData(blobs, commitments, proofs, numberOfColumns); err != nil {
|
||||
return errors.Wrap(err, "could not verify cell proofs")
|
||||
}
|
||||
} else {
|
||||
// For pre-Fulu blocks, proofs are blob proofs (1:1 with blobs)
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(commitments) {
|
||||
return errors.Errorf("number of blobs (%d), proofs (%d), and commitments (%d) do not match", len(blobs), len(proofs), len(commitments))
|
||||
}
|
||||
// Use batch verification for better performance
|
||||
if err := kzg.VerifyBlobKZGProofBatch(blobs, commitments, proofs); err != nil {
|
||||
return errors.Wrap(err, "could not verify blob proofs")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1627,6 +1644,8 @@ func (s *Server) broadcastSeenBlockSidecars(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Broadcast blob sidecars with forkchoice checking
|
||||
for _, sc := range scs {
|
||||
r, err := sc.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
chainMock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -40,7 +41,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
@@ -4781,25 +4781,329 @@ func TestServer_broadcastBlobSidecars(t *testing.T) {
|
||||
require.LogsContain(t, hook, "Broadcasted blob sidecar for already seen block")
|
||||
}
|
||||
|
||||
func Test_validateBlobSidecars(t *testing.T) {
|
||||
func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
blob := util.GetRandBlob(123)
|
||||
commitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
|
||||
proof := GoKZG.KZGProof{128, 110, 116, 170, 56, 111, 126, 87, 229, 234, 211, 42, 110, 150, 129, 206, 73, 142, 167, 243, 90, 149, 240, 240, 236, 204, 143, 182, 229, 249, 81, 27, 153, 171, 83, 70, 144, 250, 42, 1, 188, 215, 71, 235, 30, 7, 175, 86}
|
||||
// Generate proper commitment and proof for the blob
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
proof, err := kzg.ComputeBlobKZGProof(&kzgBlob, commitment)
|
||||
require.NoError(t, err)
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
require.ErrorContains(t, "number of blobs, proofs, and commitments do not match", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{}))
|
||||
require.ErrorContains(t, "number of blobs (1), proofs (0), and commitments (1) do not match", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{}))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "could not verify blob proof: can't verify opening proof", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
require.ErrorContains(t, "could not verify blob proofs", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
blobs := [][]byte{}
|
||||
commitments := [][]byte{}
|
||||
proofs := [][]byte{}
|
||||
for i := 0; i < 10; i++ {
|
||||
blobs = append(blobs, blob[:])
|
||||
commitments = append(commitments, commitment[:])
|
||||
proofs = append(proofs, proof[:])
|
||||
}
|
||||
t.Run("pre-Deneb block should return early", func(t *testing.T) {
|
||||
// Create a pre-Deneb block (e.g., Capella)
|
||||
blk := util.NewBeaconBlockCapella()
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should return nil for pre-Deneb blocks regardless of blobs
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:1], proofs[:1]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block with valid single blob", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
})
|
||||
|
||||
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:6]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with exactly 6 blobs
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:6], proofs[:6]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:7]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 7 blobs when max is 6
|
||||
err = s.validateBlobs(b, blobs[:7], proofs[:7])
|
||||
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
|
||||
})
|
||||
|
||||
t.Run("Electra block with valid blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot (epoch 5+)
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with 9 blobs in Electra
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
})
|
||||
|
||||
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 10 blobs when max is 9
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
|
||||
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
testCfg.NumberOfColumns = 128 // Standard PeerDAS configuration
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
// Create Fulu block with proper cell proofs
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
|
||||
// Generate valid commitments and cell proofs for testing
|
||||
blobCount := 2
|
||||
commitments := make([][]byte, blobCount)
|
||||
fuluBlobs := make([][]byte, blobCount)
|
||||
var kzgBlobs []kzg.Blob
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := util.GetRandBlob(int64(i))
|
||||
fuluBlobs[i] = blob[:]
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
kzgBlobs = append(kzgBlobs, kzgBlob)
|
||||
|
||||
// Generate commitment
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
commitments[i] = commitment[:]
|
||||
}
|
||||
|
||||
blk.Block.Body.BlobKzgCommitments = commitments
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate cell proofs for the blobs (flattened format like execution client)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
|
||||
for blobIdx := 0; blobIdx < blobCount; blobIdx++ {
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlobs[blobIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
for colIdx := uint64(0); colIdx < numberOfColumns; colIdx++ {
|
||||
cellProofIdx := uint64(blobIdx)*numberOfColumns + colIdx
|
||||
cellProofs[cellProofIdx] = cellsAndProofs.Proofs[colIdx][:]
|
||||
}
|
||||
}
|
||||
|
||||
s := &Server{}
|
||||
// Should use cell batch verification for Fulu blocks
|
||||
require.NoError(t, s.validateBlobs(b, fuluBlobs, cellProofs))
|
||||
})
|
||||
|
||||
t.Run("Fulu block with invalid cell proof count", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
|
||||
// Create valid commitments but wrong number of cell proofs
|
||||
blobCount := 2
|
||||
commitments := make([][]byte, blobCount)
|
||||
fuluBlobs := make([][]byte, blobCount)
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := util.GetRandBlob(int64(i))
|
||||
fuluBlobs[i] = blob[:]
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
commitments[i] = commitment[:]
|
||||
}
|
||||
|
||||
blk.Block.Body.BlobKzgCommitments = commitments
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wrong number of cell proofs (should be blobCount * numberOfColumns)
|
||||
wrongCellProofs := make([][]byte, 10) // Too few proofs
|
||||
|
||||
s := &Server{}
|
||||
err = s.validateBlobs(b, fuluBlobs, wrongCellProofs)
|
||||
require.ErrorContains(t, "do not match", err)
|
||||
})
|
||||
|
||||
t.Run("Deneb block with invalid blob proof", func(t *testing.T) {
|
||||
blob := util.GetRandBlob(123)
|
||||
invalidProof := make([]byte, 48) // All zeros - invalid proof
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{}
|
||||
err = s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{invalidProof})
|
||||
require.ErrorContains(t, "could not verify blob proofs", err)
|
||||
})
|
||||
|
||||
t.Run("empty blobs and proofs should pass", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
|
||||
})
|
||||
|
||||
t.Run("BlobSchedule with progressive increases (BPO)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.FuluForkEpoch = 200
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
// Define blob schedule with progressive increases
|
||||
testCfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
}
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
s := &Server{}
|
||||
|
||||
// Test epoch 0-9: max 3 blobs
|
||||
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 5 // Epoch 0
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:3]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:3], proofs[:3]))
|
||||
|
||||
// Should fail with 4 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:4]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:4], proofs[:4])
|
||||
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
|
||||
})
|
||||
|
||||
// Test epoch 30+: max 9 blobs
|
||||
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 960 // Epoch 30
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
|
||||
// Should fail with 10 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetPendingConsolidations(t *testing.T) {
|
||||
|
||||
@@ -1029,8 +1029,8 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head state: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// Advance state with empty transitions up to the requested epoch start slot for pre fulu state only. Fulu state utilizes proposer look ahead field.
|
||||
if st.Slot() < epochStartSlot && st.Version() != version.Fulu {
|
||||
// Notice that even for Fulu requests for the next epoch, we are only advancing the state to the start of the current epoch.
|
||||
if st.Slot() < epochStartSlot {
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head root: %v ", err), http.StatusInternalServerError)
|
||||
|
||||
@@ -2645,78 +2645,6 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProposerDuties_FuluState(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create a Fulu state with slot 0 (before epoch 1 start slot which is 32)
|
||||
fuluState, err := util.NewBeaconStateFulu()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fuluState.SetSlot(0)) // Set to slot 0
|
||||
|
||||
// Create some validators for the test
|
||||
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
|
||||
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
|
||||
require.NoError(t, err)
|
||||
|
||||
validators := make([]*ethpbalpha.Validator, len(deposits))
|
||||
for i, deposit := range deposits {
|
||||
validators[i] = ðpbalpha.Validator{
|
||||
PublicKey: deposit.Data.PublicKey,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
require.NoError(t, fuluState.SetValidators(validators))
|
||||
|
||||
// Set up block roots
|
||||
genesis := util.NewBeaconBlock()
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roots := make([][]byte, fieldparams.BlockRootsLength)
|
||||
roots[0] = genesisRoot[:]
|
||||
require.NoError(t, fuluState.SetBlockRoots(roots))
|
||||
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: fuluState, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
|
||||
db := dbutil.SetupDB(t)
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
|
||||
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: fuluState}},
|
||||
HeadFetcher: chain,
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
PayloadIDCache: cache.NewPayloadIDCache(),
|
||||
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
// Request epoch 1 duties, which should require advancing from slot 0 to slot 32
|
||||
// But for Fulu state, this advancement should be skipped
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
// Verify the state was not advanced - it should still be at slot 0
|
||||
// This is the key assertion for the regression test
|
||||
assert.Equal(t, primitives.Slot(0), fuluState.Slot(), "Fulu state should not have been advanced")
|
||||
|
||||
resp := &structs.GetProposerDutiesResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
|
||||
// Should still return proposer duties despite not advancing the state
|
||||
assert.Equal(t, true, len(resp.Data) > 0, "Should return proposer duties even without state advancement")
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
@@ -39,6 +39,7 @@ go_library(
|
||||
"//api/client/builder:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
@@ -49,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
|
||||
@@ -15,9 +15,11 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -58,28 +60,31 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert slot to time")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
}).Info("Begin building block")
|
||||
|
||||
log := log.WithField("slot", req.Slot)
|
||||
log.WithField("sinceSlotStartTime", time.Since(t)).Info("Begin building block")
|
||||
|
||||
// A syncing validator should not produce a block.
|
||||
if vs.SyncChecker.Syncing() {
|
||||
log.Error("Fail to build block: node is syncing")
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
log.WithError(err).Error("Fail to build block: node is optimistic")
|
||||
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
head, parentRoot, err := vs.getParentState(ctx, req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get parent state")
|
||||
return nil, err
|
||||
}
|
||||
sBlk, err := getEmptyBlock(req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get empty block")
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
|
||||
}
|
||||
// Set slot, graffiti, randao reveal, and parent root.
|
||||
@@ -101,8 +106,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
|
||||
resp, err := vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost, builderBoostFactor)
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
log = log.WithFields(logrus.Fields{
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
"validator": sBlk.Block().ProposerIndex(),
|
||||
})
|
||||
@@ -275,6 +279,11 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
//
|
||||
// ProposeBeaconBlock handles the proposal of beacon blocks.
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar
|
||||
)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
@@ -300,11 +309,10 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
var sidecars []*ethpb.BlobSidecar
|
||||
if block.IsBlinded() {
|
||||
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
} else if block.Version() >= version.Deneb {
|
||||
sidecars, err = vs.blobSidecarsFromUnblindedBlock(block, req)
|
||||
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(block, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
@@ -323,10 +331,9 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if err := <-errChan; err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
|
||||
@@ -335,12 +342,35 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
// broadcastAndReceiveSidecars broadcasts and receives sidecars.
|
||||
func (vs *Server) broadcastAndReceiveSidecars(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
root [fieldparams.RootLength]byte,
|
||||
blobSidecars []*ethpb.BlobSidecar,
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
) error {
|
||||
if block.Version() >= version.Fulu {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive data columns")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive blobs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks (pre-Fulu only).
|
||||
// Post-Fulu blinded blocks are handled directly in ProposeBeaconBlock.
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
|
||||
if block.Version() < version.Bellatrix {
|
||||
return nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
}
|
||||
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return nil, nil, errors.New("unconfigured block builder")
|
||||
}
|
||||
@@ -367,16 +397,34 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
||||
return copiedBlock, sidecars, nil
|
||||
}
|
||||
|
||||
func (vs *Server) blobSidecarsFromUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
|
||||
func (vs *Server) handleUnblindedBlock(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
|
||||
if block.Version() >= version.Fulu {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "protobuf conversion failed")
|
||||
@@ -392,18 +440,14 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
|
||||
}
|
||||
|
||||
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
|
||||
eg, eCtx := errgroup.WithContext(ctx)
|
||||
for i, sc := range sidecars {
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
subIdx := i
|
||||
sCar := sc
|
||||
for subIdx, sc := range sidecars {
|
||||
eg.Go(func() error {
|
||||
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sCar); err != nil {
|
||||
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sc); err != nil {
|
||||
return errors.Wrap(err, "broadcast blob failed")
|
||||
}
|
||||
readOnlySc, err := blocks.NewROBlobWithRoot(sCar, root)
|
||||
readOnlySc, err := blocks.NewROBlobWithRoot(sc, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ROBlob creation failed")
|
||||
}
|
||||
@@ -421,6 +465,53 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
sidecars []*ethpb.DataColumnSidecar,
|
||||
root [fieldparams.RootLength]byte,
|
||||
) error {
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, sidecar := range sidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecar, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
eg.Go(func() error {
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumnSidecar(root, subnet, sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
}
|
||||
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
|
||||
builderTest "github.com/OffchainLabs/prysm/v6/beacon-chain/builder/testing"
|
||||
@@ -894,6 +895,9 @@ func injectSlashings(t *testing.T, st state.BeaconState, keys []bls.SecretKey, s
|
||||
}
|
||||
|
||||
func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
// Initialize KZG for Fulu blocks
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
block func([32]byte) *ethpb.GenericSignedBeaconBlock
|
||||
@@ -1098,6 +1102,131 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
},
|
||||
err: "blob KZG commitments don't match number of blobs or KZG proofs",
|
||||
},
|
||||
{
|
||||
name: "fulu block no blob",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{Slot: 5, ParentRoot: parent[:], Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{})},
|
||||
},
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu block with single blob and cell proofs",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
numberOfColumns := uint64(128)
|
||||
// For Fulu, we have cell proofs (blobs * numberOfColumns)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := uint64(0); i < numberOfColumns; i++ {
|
||||
cellProofs[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
// Blob must be exactly 131072 bytes
|
||||
blob := make([]byte, 131072)
|
||||
blob[0] = 0x01
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Slot: 5, ParentRoot: parent[:],
|
||||
Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{
|
||||
BlobKzgCommitments: [][]byte{bytesutil.PadTo([]byte("kc"), 48)},
|
||||
}),
|
||||
},
|
||||
},
|
||||
KzgProofs: cellProofs,
|
||||
Blobs: [][]byte{blob},
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu block with multiple blobs and cell proofs",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 3
|
||||
// For Fulu, we have cell proofs (blobs * numberOfColumns)
|
||||
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
|
||||
for i := range cellProofs {
|
||||
cellProofs[i] = bytesutil.PadTo([]byte{byte(i % 256)}, 48)
|
||||
}
|
||||
// Create properly sized blobs (131072 bytes each)
|
||||
blobs := make([][]byte, blobCount)
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := make([]byte, 131072)
|
||||
blob[0] = byte(i + 1)
|
||||
blobs[i] = blob
|
||||
}
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Slot: 5, ParentRoot: parent[:],
|
||||
Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{
|
||||
BlobKzgCommitments: [][]byte{
|
||||
bytesutil.PadTo([]byte("kc"), 48),
|
||||
bytesutil.PadTo([]byte("kc1"), 48),
|
||||
bytesutil.PadTo([]byte("kc2"), 48),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
KzgProofs: cellProofs,
|
||||
Blobs: blobs,
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu block wrong cell proof count (should be blobs * 128)",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
// Wrong number of cell proofs - should be 2 * 128 = 256, but providing only 2
|
||||
// Create properly sized blobs
|
||||
blob1 := make([]byte, 131072)
|
||||
blob1[0] = 0x01
|
||||
blob2 := make([]byte, 131072)
|
||||
blob2[0] = 0x02
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Slot: 5, ParentRoot: parent[:],
|
||||
Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{
|
||||
BlobKzgCommitments: [][]byte{
|
||||
bytesutil.PadTo([]byte("kc"), 48),
|
||||
bytesutil.PadTo([]byte("kc1"), 48),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
KzgProofs: [][]byte{{0x01}, {0x02}}, // Wrong: should be 256 cell proofs
|
||||
Blobs: [][]byte{blob1, blob2},
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
err: "blobs and cells proofs mismatch",
|
||||
},
|
||||
{
|
||||
name: "blind fulu block with blob commitments",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
blockToPropose := util.NewBlindedBeaconBlockFulu()
|
||||
blockToPropose.Message.Slot = 5
|
||||
blockToPropose.Message.ParentRoot = parent[:]
|
||||
txRoot, err := ssz.TransactionsRoot([][]byte{})
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*enginev1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
blockToPropose.Message.Body.ExecutionPayloadHeader.TransactionsRoot = txRoot[:]
|
||||
blockToPropose.Message.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
|
||||
blockToPropose.Message.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}
|
||||
blk := ðpb.GenericSignedBeaconBlock_BlindedFulu{BlindedFulu: blockToPropose}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
useBuilder: true,
|
||||
err: "commitment value doesn't match block", // Known issue with mock builder cell proof mismatch
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -1111,15 +1240,29 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
db := dbutil.SetupDB(t)
|
||||
// Create cell proofs for Fulu blocks (128 proofs per blob)
|
||||
numberOfColumns := uint64(128)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := uint64(0); i < numberOfColumns; i++ {
|
||||
cellProofs[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
// Create properly sized blob for mock builder
|
||||
mockBlob := make([]byte, 131072)
|
||||
mockBlob[0] = 0x03
|
||||
// Use the same commitment as in the blind block test
|
||||
mockCommitment := bytesutil.PadTo([]byte{0x01}, 48)
|
||||
|
||||
proposerServer := &Server{
|
||||
BlockReceiver: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: tt.useBuilder, PayloadCapella: emptyPayloadCapella(), PayloadDeneb: emptyPayloadDeneb(),
|
||||
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}}},
|
||||
BeaconDB: db,
|
||||
BlobReceiver: c,
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{mockCommitment}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}},
|
||||
BlobBundleV2: &enginev1.BlobsBundleV2{KzgCommitments: [][]byte{mockCommitment}, Proofs: cellProofs, Blobs: [][]byte{mockBlob}}},
|
||||
BeaconDB: db,
|
||||
BlobReceiver: c,
|
||||
DataColumnReceiver: c, // Add DataColumnReceiver for Fulu blocks
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
}
|
||||
blockToPropose := tt.block(bsRoot)
|
||||
res, err := proposerServer.ProposeBeaconBlock(t.Context(), blockToPropose)
|
||||
|
||||
@@ -69,6 +69,7 @@ type Server struct {
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
|
||||
@@ -89,6 +89,7 @@ type Config struct {
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
@@ -238,6 +239,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
P2P: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
BlobReceiver: s.cfg.BlobReceiver,
|
||||
DataColumnReceiver: s.cfg.DataColumnReceiver,
|
||||
MockEth1Votes: s.cfg.MockEth1Votes,
|
||||
Eth1BlockFetcher: s.cfg.ExecutionChainService,
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"mock_blocker.go",
|
||||
"mock_exec_chain_info_fetcher.go",
|
||||
"mock_genesis_timefetcher.go",
|
||||
"mock_sidecars.go",
|
||||
"mock_stater.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil",
|
||||
|
||||
44
beacon-chain/rpc/testutil/mock_sidecars.go
Normal file
44
beacon-chain/rpc/testutil/mock_sidecars.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package testutil
|
||||
|
||||
import ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
|
||||
// CreateDataColumnSidecar generates a filled dummy data column sidecar
|
||||
func CreateDataColumnSidecar(index uint64, data []byte) *ethpb.DataColumnSidecar {
|
||||
return ðpb.DataColumnSidecar{
|
||||
Index: index,
|
||||
Column: [][]byte{data},
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
KzgCommitments: [][]byte{make([]byte, 48)},
|
||||
KzgProofs: [][]byte{make([]byte, 48)},
|
||||
KzgCommitmentsInclusionProof: [][]byte{make([]byte, 32)},
|
||||
}
|
||||
}
|
||||
|
||||
// CreateBlobSidecar generates a filled dummy data blob sidecar
|
||||
func CreateBlobSidecar(index uint64, blob []byte) *ethpb.BlobSidecar {
|
||||
return ðpb.BlobSidecar{
|
||||
Index: index,
|
||||
Blob: blob,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
KzgCommitment: make([]byte, 48),
|
||||
KzgProof: make([]byte, 48),
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"custody.go",
|
||||
"data_column_assignment.go",
|
||||
"data_column_sidecars.go",
|
||||
"data_columns_reconstruct.go",
|
||||
"deadlines.go",
|
||||
@@ -137,6 +138,7 @@ go_library(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
|
||||
@@ -6,18 +6,22 @@ go_library(
|
||||
"batch.go",
|
||||
"batcher.go",
|
||||
"blobs.go",
|
||||
"columns.go",
|
||||
"fulu_transition.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"pool.go",
|
||||
"service.go",
|
||||
"status.go",
|
||||
"verify.go",
|
||||
"verify_column.go",
|
||||
"worker.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/backfill",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -66,6 +70,7 @@ go_test(
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
|
||||
@@ -6,9 +6,7 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -38,8 +36,10 @@ func (s batchState) String() string {
|
||||
return "import_complete"
|
||||
case batchEndSequence:
|
||||
return "end_sequence"
|
||||
case batchBlobSync:
|
||||
return "blob_sync"
|
||||
case batchSyncBlobs:
|
||||
return "sync_blobs"
|
||||
case batchSyncColumns:
|
||||
return "sync_columns"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
@@ -50,7 +50,8 @@ const (
|
||||
batchInit
|
||||
batchSequenced
|
||||
batchErrRetryable
|
||||
batchBlobSync
|
||||
batchSyncBlobs
|
||||
batchSyncColumns
|
||||
batchImportable
|
||||
batchImportComplete
|
||||
batchEndSequence
|
||||
@@ -68,13 +69,14 @@ type batch struct {
|
||||
retryAfter time.Time
|
||||
begin primitives.Slot
|
||||
end primitives.Slot // half-open interval, [begin, end), ie >= start, < end.
|
||||
results verifiedROBlocks
|
||||
blocks verifiedROBlocks
|
||||
err error
|
||||
state batchState
|
||||
busy peer.ID
|
||||
peer peer.ID
|
||||
nextReqCols []uint64
|
||||
blockPid peer.ID
|
||||
blobPid peer.ID
|
||||
bs *blobSync
|
||||
blobs *blobSync
|
||||
columns *columnSync
|
||||
}
|
||||
|
||||
func (b batch) logFields() logrus.Fields {
|
||||
@@ -86,13 +88,24 @@ func (b batch) logFields() logrus.Fields {
|
||||
"retries": b.retries,
|
||||
"begin": b.begin,
|
||||
"end": b.end,
|
||||
"busyPid": b.busy,
|
||||
"busyPid": b.peer,
|
||||
"blockPid": b.blockPid,
|
||||
"blobPid": b.blobPid,
|
||||
}
|
||||
if b.blobs != nil {
|
||||
f["blobPid"] = b.blobs.pid
|
||||
}
|
||||
if b.columns != nil {
|
||||
f["colPid"] = b.columns.peer
|
||||
}
|
||||
if b.retries > 0 {
|
||||
f["retryAfter"] = b.retryAfter.String()
|
||||
}
|
||||
if b.state == batchSyncColumns {
|
||||
f["nextColumns"] = fmt.Sprintf("%v", b.nextReqCols)
|
||||
}
|
||||
if b.state == batchErrRetryable && b.blobs != nil {
|
||||
f["blobsMissing"] = b.blobs.needed()
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
@@ -114,7 +127,7 @@ func (b batch) id() batchId {
|
||||
}
|
||||
|
||||
func (b batch) ensureParent(expected [32]byte) error {
|
||||
tail := b.results[len(b.results)-1]
|
||||
tail := b.blocks[len(b.blocks)-1]
|
||||
if tail.Root() != expected {
|
||||
return errors.Wrapf(ErrChainBroken, "last parent_root=%#x, tail root=%#x", expected, tail.Root())
|
||||
}
|
||||
@@ -136,21 +149,15 @@ func (b batch) blobRequest() *eth.BlobSidecarsByRangeRequest {
|
||||
}
|
||||
}
|
||||
|
||||
func (b batch) withResults(results verifiedROBlocks, bs *blobSync) batch {
|
||||
b.results = results
|
||||
b.bs = bs
|
||||
if bs.blobsNeeded() > 0 {
|
||||
return b.withState(batchBlobSync)
|
||||
func (b batch) transitionToNext() batch {
|
||||
if len(b.blocks) == 0 {
|
||||
return b.withState(batchSequenced)
|
||||
}
|
||||
return b.withState(batchImportable)
|
||||
}
|
||||
|
||||
func (b batch) postBlobSync() batch {
|
||||
if b.blobsNeeded() > 0 {
|
||||
log.WithFields(b.logFields()).WithField("blobsMissing", b.blobsNeeded()).Error("Batch still missing blobs after downloading from peer")
|
||||
b.bs = nil
|
||||
b.results = []blocks.ROBlock{}
|
||||
return b.withState(batchErrRetryable)
|
||||
if len(b.columns.columnsNeeded()) > 0 {
|
||||
return b.withState(batchSyncColumns)
|
||||
}
|
||||
if b.blobs != nil && b.blobs.needed() > 0 {
|
||||
return b.withState(batchSyncBlobs)
|
||||
}
|
||||
return b.withState(batchImportable)
|
||||
}
|
||||
@@ -176,27 +183,22 @@ func (b batch) withState(s batchState) batch {
|
||||
return b
|
||||
}
|
||||
|
||||
func (b batch) withPeer(p peer.ID) batch {
|
||||
b.blockPid = p
|
||||
backfillBatchTimeWaiting.Observe(float64(time.Since(b.scheduled).Milliseconds()))
|
||||
return b
|
||||
}
|
||||
|
||||
func (b batch) withRetryableError(err error) batch {
|
||||
log.WithFields(b.logFields()).WithError(err).Warn("Could not proceed with batch processing due to error")
|
||||
b.err = err
|
||||
return b.withState(batchErrRetryable)
|
||||
}
|
||||
|
||||
func (b batch) blobsNeeded() int {
|
||||
return b.bs.blobsNeeded()
|
||||
}
|
||||
|
||||
func (b batch) blobResponseValidator() sync.BlobResponseValidation {
|
||||
return b.bs.validateNext
|
||||
}
|
||||
|
||||
func (b batch) availabilityStore() das.AvailabilityStore {
|
||||
return b.bs.store
|
||||
func (b batch) validatingColumnRequest(cb *columnBisector) *validatingColumnRequest {
|
||||
req := b.columns.request(b.nextReqCols)
|
||||
if req == nil {
|
||||
return nil
|
||||
}
|
||||
return &validatingColumnRequest{
|
||||
req: req,
|
||||
columnSync: b.columns,
|
||||
bisector: cb,
|
||||
}
|
||||
}
|
||||
|
||||
var batchBlockUntil = func(ctx context.Context, untilRetry time.Duration, b batch) error {
|
||||
@@ -223,6 +225,18 @@ func (b batch) waitUntilReady(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b batch) workComplete() bool {
|
||||
return b.state == batchImportable
|
||||
}
|
||||
|
||||
func (b batch) selectPeer(picker *sync.PeerPicker, busy map[peer.ID]bool) (peer.ID, []uint64, error) {
|
||||
if b.state == batchSyncColumns {
|
||||
return picker.ForColumns(b.columns.columnsNeeded(), busy)
|
||||
}
|
||||
peer, err := picker.ForBlocks(busy)
|
||||
return peer, nil, err
|
||||
}
|
||||
|
||||
func sortBatchDesc(bb []batch) {
|
||||
sort.Slice(bb, func(i, j int) bool {
|
||||
return bb[i].end > bb[j].end
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -48,17 +49,24 @@ func newBlobSync(current primitives.Slot, vbs verifiedROBlocks, cfg *blobSyncCon
|
||||
type blobVerifierMap map[[32]byte][]verification.BlobVerifier
|
||||
|
||||
type blobSync struct {
|
||||
store das.AvailabilityStore
|
||||
store *das.LazilyPersistentStoreBlob
|
||||
expected []blobSummary
|
||||
next int
|
||||
bbv *blobBatchVerifier
|
||||
current primitives.Slot
|
||||
pid peer.ID
|
||||
}
|
||||
|
||||
func (bs *blobSync) blobsNeeded() int {
|
||||
func (bs *blobSync) needed() int {
|
||||
return len(bs.expected) - bs.next
|
||||
}
|
||||
|
||||
// validateNext is given to the RPC request code as one of the a validation callbacks.
|
||||
// It orchestrates setting up the batch verifier (blobBatchVerifier) and calls Persist on the
|
||||
// AvailabilityStore. This enables the rest of the code in between RPC and the AvailabilityStore
|
||||
// to stay decoupled from each other. The AvailabilityStore holds the blobs in memory between the
|
||||
// call to Persist, and the call to IsDataAvailable (where the blobs are actually written to disk
|
||||
// if successfully verified).
|
||||
func (bs *blobSync) validateNext(rb blocks.ROBlob) error {
|
||||
if bs.next >= len(bs.expected) {
|
||||
return errUnexpectedResponseSize
|
||||
@@ -102,6 +110,7 @@ func newBlobBatchVerifier(nbv verification.NewBlobVerifier) *blobBatchVerifier {
|
||||
return &blobBatchVerifier{newBlobVerifier: nbv, verifiers: make(blobVerifierMap)}
|
||||
}
|
||||
|
||||
// blobBatchVerifier implements the BlobBatchVerifier interface required by the das store.
|
||||
type blobBatchVerifier struct {
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
verifiers blobVerifierMap
|
||||
@@ -117,6 +126,7 @@ func (bbv *blobBatchVerifier) newVerifier(rb blocks.ROBlob) verification.BlobVer
|
||||
return m[rb.Index]
|
||||
}
|
||||
|
||||
// VerifiedROBlobs satisfies the BlobBatchVerifier interface expected by the AvailabilityChecker
|
||||
func (bbv *blobBatchVerifier) VerifiedROBlobs(_ context.Context, blk blocks.ROBlock, _ []blocks.ROBlob) ([]blocks.VerifiedROBlob, error) {
|
||||
m, ok := bbv.verifiers[blk.Root()]
|
||||
if !ok {
|
||||
|
||||
224
beacon-chain/sync/backfill/columns.go
Normal file
224
beacon-chain/sync/backfill/columns.go
Normal file
@@ -0,0 +1,224 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type columnBatch struct {
|
||||
first primitives.Slot
|
||||
last primitives.Slot
|
||||
custodyGroups peerdas.ColumnIndices
|
||||
toDownload map[[32]byte]*toDownload
|
||||
}
|
||||
|
||||
type toDownload struct {
|
||||
remaining peerdas.ColumnIndices
|
||||
commitments [][]byte
|
||||
}
|
||||
|
||||
func (cs *columnBatch) needed() peerdas.ColumnIndices {
|
||||
// make a copy that we can modify to reduce search iterations.
|
||||
search := cs.custodyGroups.ToMap()
|
||||
ci := peerdas.ColumnIndices{}
|
||||
for _, v := range cs.toDownload {
|
||||
if len(search) == 0 {
|
||||
return ci
|
||||
}
|
||||
for col := range search {
|
||||
if v.remaining.Has(col) {
|
||||
ci.Set(col)
|
||||
// avoid iterating every single block+index by only searching for indices
|
||||
// we haven't found yet.
|
||||
delete(search, col)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
type columnSync struct {
|
||||
*columnBatch
|
||||
store *das.LazilyPersistentStoreColumn
|
||||
current primitives.Slot
|
||||
peer peer.ID
|
||||
bisector *columnBisector
|
||||
}
|
||||
|
||||
func newColumnSync(b batch, blks verifiedROBlocks, current primitives.Slot, p p2p.P2P, vbs verifiedROBlocks, cfg *workerCfg) (*columnSync, error) {
|
||||
cgc, err := p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
cb, err := buildColumnBatch(b, blks, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cb == nil {
|
||||
return &columnSync{}, nil
|
||||
}
|
||||
|
||||
bisector := newColumnBisector(cfg.downscore)
|
||||
return &columnSync{
|
||||
columnBatch: cb,
|
||||
current: current,
|
||||
store: das.NewLazilyPersistentStoreColumn(cfg.colStore, cfg.newVC, p.NodeID(), cgc, bisector),
|
||||
bisector: bisector,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cs *columnSync) blockColumns(root [32]byte) *toDownload {
|
||||
if cs.columnBatch == nil {
|
||||
return nil
|
||||
}
|
||||
return cs.columnBatch.toDownload[root]
|
||||
}
|
||||
|
||||
func (cs *columnSync) columnsNeeded() peerdas.ColumnIndices {
|
||||
if cs.columnBatch == nil {
|
||||
return peerdas.ColumnIndices{}
|
||||
}
|
||||
return cs.columnBatch.needed()
|
||||
}
|
||||
|
||||
func (cs *columnSync) request(reqCols []uint64) *ethpb.DataColumnSidecarsByRangeRequest {
|
||||
return sync.DataColumnSidecarsByRangeRequest(reqCols, cs.first, cs.last)
|
||||
}
|
||||
|
||||
type validatingColumnRequest struct {
|
||||
req *ethpb.DataColumnSidecarsByRangeRequest
|
||||
columnSync *columnSync
|
||||
bisector *columnBisector
|
||||
}
|
||||
|
||||
func (v *validatingColumnRequest) validate(cd blocks.RODataColumn) (err error) {
|
||||
defer func(validity string, start time.Time) {
|
||||
dataColumnSidecarVerifyMs.Observe(float64(time.Since(start).Milliseconds()))
|
||||
if err != nil {
|
||||
validity = "invalid"
|
||||
}
|
||||
dataColumnSidecarDownloadCount.WithLabelValues(fmt.Sprintf("%d", cd.Index), validity).Inc()
|
||||
dataColumnSidecarDownloadBytes.Add(float64(cd.SizeSSZ()))
|
||||
}("valid", time.Now())
|
||||
return v.countedValidation(cd)
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidDataColumnResponse = errors.New("invalid DataColumnSidecar response")
|
||||
errUnexpectedBlockRoot = errors.Wrap(errInvalidDataColumnResponse, "unexpected sidecar block root")
|
||||
errCommitmentLengthMismatch = errors.Wrap(errInvalidDataColumnResponse, "sidecar has different commitment count than block")
|
||||
errCommitmentValueMismatch = errors.Wrap(errInvalidDataColumnResponse, "sidecar commitments do not match block")
|
||||
)
|
||||
|
||||
// When we call Persist we'll get the verification checks that are provided by the availability store.
|
||||
// In addition to those checks this function calls rpcValidity which maintains a state machine across
|
||||
// response values to ensure that the response is valid in the context of the overall request,
|
||||
// like making sure that the block roots is one of the ones we expect based on the blocks we used to
|
||||
// construct the request. It also does cheap sanity checks on the DataColumnSidecar values like
|
||||
// ensuring that the commitments line up with the block.
|
||||
func (v *validatingColumnRequest) countedValidation(cd blocks.RODataColumn) error {
|
||||
root := cd.BlockRoot()
|
||||
expected := v.columnSync.blockColumns(root)
|
||||
if expected == nil {
|
||||
return errors.Wrapf(errUnexpectedBlockRoot, "root=%#x, slot=%d", root, cd.Slot())
|
||||
}
|
||||
// We don't need this column, but we trust the column state machine verified we asked for it as part of a range request.
|
||||
// So we can just skip over it and not try to persist it.
|
||||
if !expected.remaining.Has(cd.Index) {
|
||||
return nil
|
||||
}
|
||||
if len(cd.KzgCommitments) != len(expected.commitments) {
|
||||
return errors.Wrapf(errCommitmentLengthMismatch, "root=%#x, slot=%d, index=%d", root, cd.Slot(), cd.Index)
|
||||
}
|
||||
for i, cmt := range cd.KzgCommitments {
|
||||
if !bytes.Equal(cmt, expected.commitments[i]) {
|
||||
return errors.Wrapf(errCommitmentValueMismatch, "root=%#x, slot=%d, index=%d", root, cd.Slot(), cd.Index)
|
||||
}
|
||||
}
|
||||
if err := v.columnSync.store.Persist(v.columnSync.current, cd); err != nil {
|
||||
return errors.Wrap(err, "persisting data column")
|
||||
}
|
||||
v.bisector.addPeerColumns(v.columnSync.peer, cd)
|
||||
expected.remaining.Unset(cd.Index)
|
||||
return nil
|
||||
}
|
||||
|
||||
func currentCustodiedColumns(p p2p.P2P) (peerdas.ColumnIndices, error) {
|
||||
cgc, err := p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
// Note that in the case where custody_group_count is the minimum CUSTODY_REQUIREMENT, we will
|
||||
// still download the extra columns dictated by SAMPLES_PER_SLOT. This is a hack to avoid complexity in the DA check.
|
||||
// We may want to revisit this to reduce bandwidth and storage for nodes with 0 validators attached.
|
||||
peerInfo, _, err := peerdas.Info(p.NodeID(), max(cgc, params.BeaconConfig().SamplesPerSlot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
return peerdas.NewColumnIndicesFromMap(peerInfo.CustodyColumns), nil
|
||||
}
|
||||
|
||||
func buildColumnBatch(b batch, fuluBlocks verifiedROBlocks, p p2p.P2P) (*columnBatch, error) {
|
||||
if len(fuluBlocks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
fuluStart := params.BeaconConfig().FuluForkEpoch
|
||||
// If the batch end slot or last result block are pre-fulu, so are the rest.
|
||||
if slots.ToEpoch(b.end) < fuluStart || slots.ToEpoch(fuluBlocks[len(fuluBlocks)-1].Block().Slot()) < fuluStart {
|
||||
return nil, nil
|
||||
}
|
||||
// The last block in the batch is in fulu, but the first one is not.
|
||||
// Find the index of the first fulu block to exclude the pre-fulu blocks.
|
||||
if slots.ToEpoch(fuluBlocks[0].Block().Slot()) < fuluStart {
|
||||
fuluStart := sort.Search(len(fuluBlocks), func(i int) bool {
|
||||
return slots.ToEpoch(fuluBlocks[i].Block().Slot()) >= fuluStart
|
||||
})
|
||||
fuluBlocks = fuluBlocks[fuluStart:]
|
||||
}
|
||||
|
||||
indices, err := currentCustodiedColumns(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "current custodied columns")
|
||||
}
|
||||
|
||||
summary := &columnBatch{
|
||||
custodyGroups: indices,
|
||||
toDownload: make(map[[32]byte]*toDownload, len(fuluBlocks)),
|
||||
}
|
||||
for _, b := range fuluBlocks {
|
||||
cmts, err := b.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get blob kzg commitments")
|
||||
}
|
||||
if len(cmts) == 0 {
|
||||
continue
|
||||
}
|
||||
slot := b.Block().Slot()
|
||||
if len(summary.toDownload) == 0 {
|
||||
summary.first = slot
|
||||
}
|
||||
summary.toDownload[b.Root()] = &toDownload{
|
||||
remaining: indices.Copy(),
|
||||
commitments: cmts,
|
||||
}
|
||||
summary.last = slot
|
||||
}
|
||||
|
||||
return summary, nil
|
||||
}
|
||||
70
beacon-chain/sync/backfill/fulu_transition.go
Normal file
70
beacon-chain/sync/backfill/fulu_transition.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var errAvailabilityCheckerInvalid = errors.New("invalid availability checker state")
|
||||
|
||||
type multiStore struct {
|
||||
fuluStart primitives.Slot
|
||||
columnStore das.AvailabilityChecker
|
||||
blobStore das.AvailabilityChecker
|
||||
}
|
||||
|
||||
// Persist implements das.AvailabilityStore.
|
||||
var _ das.AvailabilityChecker = &multiStore{}
|
||||
|
||||
// IsDataAvailable implements the das.AvailabilityStore interface.
|
||||
func (m *multiStore) IsDataAvailable(ctx context.Context, current primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
for i := range blks {
|
||||
// Slice the blocks and route to the appropriate store based on the fulu transition slot.
|
||||
if blks[i].Block().Slot() >= m.fuluStart {
|
||||
if err := m.checkAvailabilityWithFallback(ctx, m.columnStore, current, blks[i:]...); err != nil {
|
||||
return err
|
||||
}
|
||||
// If there were any pre-fulu blocks in the batch, route those to the blob store.
|
||||
if i > 0 {
|
||||
return m.checkAvailabilityWithFallback(ctx, m.blobStore, current, blks[:i]...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// If we get here, all blocks are before the fulu transition.
|
||||
return m.checkAvailabilityWithFallback(ctx, m.blobStore, current, blks...)
|
||||
}
|
||||
|
||||
func (m *multiStore) checkAvailabilityWithFallback(ctx context.Context, ac das.AvailabilityChecker, current primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
if ac != nil {
|
||||
return ac.IsDataAvailable(ctx, current, blks...)
|
||||
}
|
||||
// TODO: I think this was a hack and should not be necessary any longer.
|
||||
// Perhaps this could happen with lazy initialization of the availability stores
|
||||
// if the batch is pre-deneb or if there are no blobs in the batch?
|
||||
for _, blk := range blks {
|
||||
cmts, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(cmts) > 0 {
|
||||
return errAvailabilityCheckerInvalid
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newMultiStore(fuluStart primitives.Slot, b batch) *multiStore {
|
||||
s := &multiStore{fuluStart: fuluStart}
|
||||
if b.blobs != nil && b.blobs.store != nil {
|
||||
s.blobStore = b.blobs.store
|
||||
}
|
||||
if b.columns != nil && b.columns.store != nil {
|
||||
s.columnStore = b.columns.store
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -1,5 +1,42 @@
|
||||
package backfill
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "backfill")
|
||||
|
||||
// intervalLogger only logs once for each interval. It only customizes a single
|
||||
// instance of the entry/logger and should just be used to control the logging rate for
|
||||
// *one specific line of code*.
|
||||
type intervalLogger struct {
|
||||
*logrus.Entry
|
||||
seconds int64 // seconds is the number of seconds per logging interval
|
||||
last *atomic.Int64 // last is the quantized representation of the last time a log was emitted
|
||||
}
|
||||
|
||||
func newIntervalLogger(base *logrus.Entry, secondsBetweenLogs int64) *intervalLogger {
|
||||
return &intervalLogger{
|
||||
Entry: base,
|
||||
seconds: secondsBetweenLogs,
|
||||
last: new(atomic.Int64),
|
||||
}
|
||||
}
|
||||
|
||||
// Log overloads the Log() method of logrus.Entry, which is called under the hood
|
||||
// when a log-level specific method (like Info(), Warn(), Error()) is invoked.
|
||||
// By intercepting this call we can rate limit how often we log.
|
||||
func (l *intervalLogger) Log(level logrus.Level, args ...interface{}) {
|
||||
// last is computed as the integer division of the current unix timestamp
|
||||
// divided by the number of seconds per interval.
|
||||
current := time.Now().Unix() / l.seconds
|
||||
// If Swap yields a different value, then we haven't yet logged within
|
||||
// the current window. Swap atomically sets the value so we can just
|
||||
// delegate the call and we're done.
|
||||
if l.last.Swap(current) != current {
|
||||
l.Logger.Log(level, args...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,40 +21,24 @@ var (
|
||||
Help: "Number of batches that are ready to be imported once they can be connected to the existing chain.",
|
||||
},
|
||||
)
|
||||
backfillRemainingBatches = promauto.NewGauge(
|
||||
batchesRemaining = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "backfill_remaining_batches",
|
||||
Help: "Backfill remaining batches.",
|
||||
},
|
||||
)
|
||||
backfillBatchesImported = promauto.NewCounter(
|
||||
batchesImported = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_batches_imported",
|
||||
Help: "Number of backfill batches downloaded and imported.",
|
||||
},
|
||||
)
|
||||
backfillBlocksApproximateBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_blocks_bytes_downloaded",
|
||||
Help: "BeaconBlock bytes downloaded from peers for backfill.",
|
||||
},
|
||||
)
|
||||
backfillBlobsApproximateBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_blobs_bytes_downloaded",
|
||||
Help: "BlobSidecar bytes downloaded from peers for backfill.",
|
||||
},
|
||||
)
|
||||
backfillBlobsDownloadCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_blobs_download_count",
|
||||
Help: "Number of BlobSidecar values downloaded from peers for backfill.",
|
||||
},
|
||||
)
|
||||
backfillBlocksDownloadCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_blocks_download_count",
|
||||
Help: "Number of BeaconBlock values downloaded from peers for backfill.",
|
||||
|
||||
backfillBatchTimeWaiting = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_time_waiting",
|
||||
Help: "Time batch waited for a suitable peer.",
|
||||
Buckets: []float64{50, 100, 300, 1000, 2000},
|
||||
},
|
||||
)
|
||||
backfillBatchTimeRoundtrip = promauto.NewHistogram(
|
||||
@@ -64,43 +48,90 @@ var (
|
||||
Buckets: []float64{400, 800, 1600, 3200, 6400, 12800},
|
||||
},
|
||||
)
|
||||
backfillBatchTimeWaiting = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_time_waiting",
|
||||
Help: "Time batch waited for a suitable peer.",
|
||||
Buckets: []float64{50, 100, 300, 1000, 2000},
|
||||
|
||||
blockDownloadCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_blocks_download_count",
|
||||
Help: "Number of BeaconBlock values downloaded from peers for backfill.",
|
||||
},
|
||||
)
|
||||
backfillBatchTimeDownloadingBlocks = promauto.NewHistogram(
|
||||
blockDownloadBytesApprox = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_blocks_bytes_downloaded",
|
||||
Help: "BeaconBlock bytes downloaded from peers for backfill.",
|
||||
},
|
||||
)
|
||||
blockDownloadMs = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_blocks_time_download",
|
||||
Help: "Time, in milliseconds, batch spent downloading blocks from peer.",
|
||||
Help: "BeaconBlock download time, in ms.",
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
backfillBatchTimeDownloadingBlobs = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_blobs_time_download",
|
||||
Help: "Time, in milliseconds, batch spent downloading blobs from peer.",
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
backfillBatchTimeVerifying = promauto.NewHistogram(
|
||||
blockVerifyMs = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_time_verify",
|
||||
Help: "Time batch spent downloading blocks from peer.",
|
||||
Help: "BeaconBlock verification time, in ms.",
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
|
||||
blobSidecarDownloadCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_blobs_download_count",
|
||||
Help: "Number of BlobSidecar values downloaded from peers for backfill.",
|
||||
},
|
||||
)
|
||||
blobSidecarDownloadBytesApprox = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_blobs_bytes_downloaded",
|
||||
Help: "BlobSidecar bytes downloaded from peers for backfill.",
|
||||
},
|
||||
)
|
||||
blobSidecarDownloadMs = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_blobs_time_download",
|
||||
Help: "BlobSidecar download time, in ms.",
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnSidecarDownloadCount = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_data_column_sidecar_downloaded",
|
||||
Help: "Number of DataColumnSidecar values downloaded from peers for backfill.",
|
||||
},
|
||||
[]string{"index", "validity"},
|
||||
)
|
||||
dataColumnSidecarDownloadBytes = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "backfill_data_column_sidecar_bytes_downloaded",
|
||||
Help: "DataColumnSidecar bytes downloaded from peers for backfill.",
|
||||
},
|
||||
)
|
||||
dataColumnSidecarDownloadMs = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_columns_time_download",
|
||||
Help: "DataColumnSidecars download time, in ms.",
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
dataColumnSidecarVerifyMs = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "backfill_batch_columns_time_verify",
|
||||
Help: "DataColumnSidecars verification time, in ms.",
|
||||
Buckets: []float64{100, 300, 1000, 2000, 4000, 8000},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func blobValidationMetrics(_ blocks.ROBlob) error {
|
||||
backfillBlobsDownloadCount.Inc()
|
||||
blobSidecarDownloadCount.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
func blockValidationMetrics(interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
backfillBlocksDownloadCount.Inc()
|
||||
blockDownloadCount.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,22 +2,18 @@ package backfill
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type batchWorkerPool interface {
|
||||
spawn(ctx context.Context, n int, clock *startup.Clock, a PeerAssigner, v *verifier, cm sync.ContextByteVersions, blobVerifier verification.NewBlobVerifier, bfs *filesystem.BlobStorage)
|
||||
spawn(ctx context.Context, n int, a PeerAssigner, cfg *workerCfg)
|
||||
todo(b batch)
|
||||
complete() (batch, error)
|
||||
}
|
||||
@@ -26,25 +22,33 @@ type worker interface {
|
||||
run(context.Context)
|
||||
}
|
||||
|
||||
type newWorker func(id workerId, in, out chan batch, c *startup.Clock, v *verifier, cm sync.ContextByteVersions, nbv verification.NewBlobVerifier, bfs *filesystem.BlobStorage) worker
|
||||
type newWorker func(id workerId, in, out chan batch, cfg *workerCfg) worker
|
||||
|
||||
func defaultNewWorker(p p2p.P2P) newWorker {
|
||||
return func(id workerId, in, out chan batch, c *startup.Clock, v *verifier, cm sync.ContextByteVersions, nbv verification.NewBlobVerifier, bfs *filesystem.BlobStorage) worker {
|
||||
return newP2pWorker(id, p, in, out, c, v, cm, nbv, bfs)
|
||||
return func(id workerId, in, out chan batch, cfg *workerCfg) worker {
|
||||
return newP2pWorker(id, p, in, out, cfg)
|
||||
}
|
||||
}
|
||||
|
||||
// minRequestInterval is the minimum amount of time between requests.
|
||||
// ie a value of 1s means we'll make ~1 req/sec per peer.
|
||||
const minReqInterval = time.Second
|
||||
|
||||
type p2pBatchWorkerPool struct {
|
||||
maxBatches int
|
||||
newWorker newWorker
|
||||
toWorkers chan batch
|
||||
fromWorkers chan batch
|
||||
toRouter chan batch
|
||||
fromRouter chan batch
|
||||
shutdownErr chan error
|
||||
endSeq []batch
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
maxBatches int
|
||||
newWorker newWorker
|
||||
toWorkers chan batch
|
||||
fromWorkers chan batch
|
||||
toRouter chan batch
|
||||
fromRouter chan batch
|
||||
shutdownErr chan error
|
||||
endSeq []batch
|
||||
ctx context.Context
|
||||
cancel func()
|
||||
earliest primitives.Slot
|
||||
peerCache *sync.DASPeerCache
|
||||
p2p p2p.P2P
|
||||
peerFailLogger *intervalLogger
|
||||
}
|
||||
|
||||
var _ batchWorkerPool = &p2pBatchWorkerPool{}
|
||||
@@ -52,21 +56,24 @@ var _ batchWorkerPool = &p2pBatchWorkerPool{}
|
||||
func newP2PBatchWorkerPool(p p2p.P2P, maxBatches int) *p2pBatchWorkerPool {
|
||||
nw := defaultNewWorker(p)
|
||||
return &p2pBatchWorkerPool{
|
||||
newWorker: nw,
|
||||
toRouter: make(chan batch, maxBatches),
|
||||
fromRouter: make(chan batch, maxBatches),
|
||||
toWorkers: make(chan batch),
|
||||
fromWorkers: make(chan batch),
|
||||
maxBatches: maxBatches,
|
||||
shutdownErr: make(chan error),
|
||||
newWorker: nw,
|
||||
toRouter: make(chan batch, maxBatches),
|
||||
fromRouter: make(chan batch, maxBatches),
|
||||
toWorkers: make(chan batch),
|
||||
fromWorkers: make(chan batch),
|
||||
maxBatches: maxBatches,
|
||||
shutdownErr: make(chan error),
|
||||
peerCache: sync.NewDASPeerCache(p),
|
||||
p2p: p,
|
||||
peerFailLogger: newIntervalLogger(log, 5),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) spawn(ctx context.Context, n int, c *startup.Clock, a PeerAssigner, v *verifier, cm sync.ContextByteVersions, nbv verification.NewBlobVerifier, bfs *filesystem.BlobStorage) {
|
||||
func (p *p2pBatchWorkerPool) spawn(ctx context.Context, n int, a PeerAssigner, cfg *workerCfg) {
|
||||
p.ctx, p.cancel = context.WithCancel(ctx)
|
||||
go p.batchRouter(a)
|
||||
for i := 0; i < n; i++ {
|
||||
go p.newWorker(workerId(i), p.toWorkers, p.fromWorkers, c, v, cm, nbv, bfs).run(p.ctx)
|
||||
go p.newWorker(workerId(i), p.toWorkers, p.fromWorkers, cfg).run(p.ctx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,7 +110,6 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
busy := make(map[peer.ID]bool)
|
||||
todo := make([]batch, 0)
|
||||
rt := time.NewTicker(time.Second)
|
||||
earliest := primitives.Slot(math.MaxUint64)
|
||||
for {
|
||||
select {
|
||||
case b := <-p.toRouter:
|
||||
@@ -115,51 +121,89 @@ func (p *p2pBatchWorkerPool) batchRouter(pa PeerAssigner) {
|
||||
// This ticker exists to periodically break out of the channel select
|
||||
// to retry failed assignments.
|
||||
case b := <-p.fromWorkers:
|
||||
pid := b.busy
|
||||
busy[pid] = false
|
||||
if b.state == batchBlobSync {
|
||||
todo = append(todo, b)
|
||||
sortBatchDesc(todo)
|
||||
} else {
|
||||
pid := b.peer
|
||||
delete(busy, pid)
|
||||
if b.workComplete() {
|
||||
p.fromRouter <- b
|
||||
break
|
||||
}
|
||||
todo = append(todo, b)
|
||||
sortBatchDesc(todo)
|
||||
case <-p.ctx.Done():
|
||||
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
|
||||
p.shutdown(p.ctx.Err())
|
||||
return
|
||||
}
|
||||
if len(todo) == 0 {
|
||||
continue
|
||||
}
|
||||
// Try to assign as many outstanding batches as possible to peers and feed the assigned batches to workers.
|
||||
assigned, err := pa.Assign(busy, len(todo))
|
||||
var err error
|
||||
todo, err = p.processTodo(todo, pa, busy)
|
||||
if err != nil {
|
||||
if errors.Is(err, peers.ErrInsufficientSuitable) {
|
||||
// Transient error resulting from insufficient number of connected peers. Leave batches in
|
||||
// queue and get to them whenever the peer situation is resolved.
|
||||
continue
|
||||
}
|
||||
p.shutdown(err)
|
||||
return
|
||||
}
|
||||
for _, pid := range assigned {
|
||||
if err := todo[0].waitUntilReady(p.ctx); err != nil {
|
||||
log.WithError(p.ctx.Err()).Info("p2pBatchWorkerPool context canceled, shutting down")
|
||||
p.shutdown(p.ctx.Err())
|
||||
return
|
||||
}
|
||||
busy[pid] = true
|
||||
todo[0].busy = pid
|
||||
p.toWorkers <- todo[0].withPeer(pid)
|
||||
if todo[0].begin < earliest {
|
||||
earliest = todo[0].begin
|
||||
oldestBatch.Set(float64(earliest))
|
||||
}
|
||||
todo = todo[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) processTodo(todo []batch, pa PeerAssigner, busy map[peer.ID]bool) ([]batch, error) {
|
||||
if len(todo) == 0 {
|
||||
return todo, nil
|
||||
}
|
||||
notBusy, err := pa.Assign(peers.NotBusy(busy, -1))
|
||||
if err != nil {
|
||||
if errors.Is(err, peers.ErrInsufficientSuitable) {
|
||||
// Transient error resulting from insufficient number of connected peers. Leave batches in
|
||||
// queue and get to them whenever the peer situation is resolved.
|
||||
return todo, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if len(notBusy) == 0 {
|
||||
log.Warn("No suitable peers available for batch assignment")
|
||||
return todo, nil
|
||||
}
|
||||
|
||||
custodied, err := currentCustodiedColumns(p.p2p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "current custodied columns")
|
||||
}
|
||||
picker, err := p.peerCache.NewPicker(notBusy, custodied, minReqInterval)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to compute column-weighted peer scores")
|
||||
return todo, nil
|
||||
}
|
||||
|
||||
for i, b := range todo {
|
||||
if b.state == batchErrRetryable {
|
||||
// Columns can fail in a partial fashion, so we nee to reset
|
||||
// components that track peer interactions for multiple columns
|
||||
// to enable partial retries.
|
||||
b = resetRetryableColumns(b)
|
||||
// Set the next correct state after retryable error
|
||||
b = b.transitionToNext()
|
||||
}
|
||||
pid, cols, err := b.selectPeer(picker, busy)
|
||||
if err != nil {
|
||||
p.peerFailLogger.WithField("notBusy", len(notBusy)).WithError(err).WithFields(b.logFields()).Error("Failed to select peer for batch")
|
||||
// Return the remaining todo items and allow the outer loop to control when we try again.
|
||||
return todo[i:], nil
|
||||
}
|
||||
busy[pid] = true
|
||||
b.peer = pid
|
||||
b.nextReqCols = cols
|
||||
// TODO: these metrics are all messed up
|
||||
backfillBatchTimeWaiting.Observe(float64(time.Since(b.scheduled).Milliseconds()))
|
||||
p.toWorkers <- b
|
||||
p.updateEarliest(b.begin)
|
||||
}
|
||||
return []batch{}, nil
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) updateEarliest(current primitives.Slot) {
|
||||
if current >= p.earliest {
|
||||
return
|
||||
}
|
||||
p.earliest = current
|
||||
oldestBatch.Set(float64(p.earliest))
|
||||
}
|
||||
|
||||
func (p *p2pBatchWorkerPool) shutdown(err error) {
|
||||
p.cancel()
|
||||
p.shutdownErr <- err
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
@@ -24,7 +25,7 @@ type mockAssigner struct {
|
||||
|
||||
// Assign satisfies the PeerAssigner interface so that mockAssigner can be used in tests
|
||||
// in place of the concrete p2p implementation of PeerAssigner.
|
||||
func (m mockAssigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
|
||||
func (m mockAssigner) Assign(filter peers.AssignmentFilter) ([]peer.ID, error) {
|
||||
if m.err != nil {
|
||||
return nil, m.err
|
||||
}
|
||||
@@ -53,7 +54,8 @@ func TestPoolDetectAllEnded(t *testing.T) {
|
||||
ctxMap, err := sync.ContextByteVersionsForValRoot(bytesutil.ToBytes32(st.GenesisValidatorsRoot()))
|
||||
require.NoError(t, err)
|
||||
bfs := filesystem.NewEphemeralBlobStorage(t)
|
||||
pool.spawn(ctx, nw, startup.NewClock(time.Now(), [32]byte{}), ma, v, ctxMap, mockNewBlobVerifier, bfs)
|
||||
wcfg := &workerCfg{clock: startup.NewClock(time.Now(), [32]byte{}), newVB: mockNewBlobVerifier, verifier: v, ctxMap: ctxMap, blobStore: bfs}
|
||||
pool.spawn(ctx, nw, ma, wcfg)
|
||||
br := batcher{min: 10, size: 10}
|
||||
endSeq := br.before(0)
|
||||
require.Equal(t, batchEndSequence, endSeq.state)
|
||||
@@ -72,7 +74,7 @@ type mockPool struct {
|
||||
todoChan chan batch
|
||||
}
|
||||
|
||||
func (m *mockPool) spawn(_ context.Context, _ int, _ *startup.Clock, _ PeerAssigner, _ *verifier, _ sync.ContextByteVersions, _ verification.NewBlobVerifier, _ *filesystem.BlobStorage) {
|
||||
func (m *mockPool) spawn(_ context.Context, _ int, _ PeerAssigner, _ *workerCfg) {
|
||||
}
|
||||
|
||||
func (m *mockPool) todo(b batch) {
|
||||
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -21,26 +22,26 @@ import (
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
enabled bool // service is disabled by default while feature is experimental
|
||||
clock *startup.Clock
|
||||
store *Store
|
||||
ms minimumSlotter
|
||||
cw startup.ClockWaiter
|
||||
verifierWaiter InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
nWorkers int
|
||||
batchSeq *batchSequencer
|
||||
batchSize uint64
|
||||
pool batchWorkerPool
|
||||
verifier *verifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
p2p p2p.P2P
|
||||
pa PeerAssigner
|
||||
batchImporter batchImporter
|
||||
blobStore *filesystem.BlobStorage
|
||||
initSyncWaiter func() error
|
||||
complete chan struct{}
|
||||
ctx context.Context
|
||||
enabled bool // service is disabled by default while feature is experimental
|
||||
clock *startup.Clock
|
||||
store *Store
|
||||
ms minimumSlotter
|
||||
cw startup.ClockWaiter
|
||||
verifierWaiter InitializerWaiter
|
||||
nWorkers int
|
||||
batchSeq *batchSequencer
|
||||
batchSize uint64
|
||||
pool batchWorkerPool
|
||||
p2p p2p.P2P
|
||||
pa PeerAssigner
|
||||
batchImporter batchImporter
|
||||
blobStore *filesystem.BlobStorage
|
||||
dcStore *filesystem.DataColumnStorage
|
||||
initSyncWaiter func() error
|
||||
complete chan struct{}
|
||||
workerCfg *workerCfg
|
||||
fuluStart primitives.Slot
|
||||
}
|
||||
|
||||
var _ runtime.Service = (*Service)(nil)
|
||||
@@ -49,23 +50,13 @@ var _ runtime.Service = (*Service)(nil)
|
||||
// to service an RPC blockRequest. The Assign method takes a map of peers that should be excluded,
|
||||
// allowing the caller to avoid making multiple concurrent requests to the same peer.
|
||||
type PeerAssigner interface {
|
||||
Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error)
|
||||
//Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error)
|
||||
Assign(filter peers.AssignmentFilter) ([]peer.ID, error)
|
||||
}
|
||||
|
||||
type minimumSlotter func(primitives.Slot) primitives.Slot
|
||||
type batchImporter func(ctx context.Context, current primitives.Slot, b batch, su *Store) (*dbval.BackfillStatus, error)
|
||||
|
||||
func defaultBatchImporter(ctx context.Context, current primitives.Slot, b batch, su *Store) (*dbval.BackfillStatus, error) {
|
||||
status := su.status()
|
||||
if err := b.ensureParent(bytesutil.ToBytes32(status.LowParentRoot)); err != nil {
|
||||
return status, err
|
||||
}
|
||||
// Import blocks to db and update db state to reflect the newly imported blocks.
|
||||
// Other parts of the beacon node may use the same StatusUpdater instance
|
||||
// via the coverage.AvailableBlocker interface to safely determine if a given slot has been backfilled.
|
||||
return su.fillBack(ctx, current, b.results, b.availabilityStore())
|
||||
}
|
||||
|
||||
// ServiceOption represents a functional option for the backfill service constructor.
|
||||
type ServiceOption func(*Service) error
|
||||
|
||||
@@ -140,46 +131,31 @@ func WithMinimumSlot(s primitives.Slot) ServiceOption {
|
||||
|
||||
// NewService initializes the backfill Service. Like all implementations of the Service interface,
|
||||
// the service won't begin its runloop until Start() is called.
|
||||
func NewService(ctx context.Context, su *Store, bStore *filesystem.BlobStorage, cw startup.ClockWaiter, p p2p.P2P, pa PeerAssigner, opts ...ServiceOption) (*Service, error) {
|
||||
func NewService(ctx context.Context, su *Store, bStore *filesystem.BlobStorage, dcStore *filesystem.DataColumnStorage, cw startup.ClockWaiter, p p2p.P2P, pa PeerAssigner, opts ...ServiceOption) (*Service, error) {
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
store: su,
|
||||
blobStore: bStore,
|
||||
cw: cw,
|
||||
ms: minimumBackfillSlot,
|
||||
p2p: p,
|
||||
pa: pa,
|
||||
batchImporter: defaultBatchImporter,
|
||||
complete: make(chan struct{}),
|
||||
ctx: ctx,
|
||||
store: su,
|
||||
blobStore: bStore,
|
||||
dcStore: dcStore,
|
||||
cw: cw,
|
||||
ms: minimumBackfillSlot,
|
||||
p2p: p,
|
||||
pa: pa,
|
||||
complete: make(chan struct{}),
|
||||
fuluStart: slots.SafeEpochStartOrMax(params.BeaconConfig().FuluForkEpoch),
|
||||
}
|
||||
s.batchImporter = s.defaultBatchImporter
|
||||
for _, o := range opts {
|
||||
if err := o(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
s.pool = newP2PBatchWorkerPool(p, s.nWorkers)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Service) initVerifier(ctx context.Context) (*verifier, sync.ContextByteVersions, error) {
|
||||
cps, err := s.store.originState(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
keys, err := cps.PublicKeys()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to retrieve public keys for all validators in the origin state")
|
||||
}
|
||||
vr := cps.GenesisValidatorsRoot()
|
||||
ctxMap, err := sync.ContextByteVersionsForValRoot(bytesutil.ToBytes32(vr))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root %#x", vr)
|
||||
}
|
||||
v, err := newBackfillVerifier(vr, keys)
|
||||
return v, ctxMap, err
|
||||
}
|
||||
|
||||
func (s *Service) updateComplete() bool {
|
||||
b, err := s.pool.complete()
|
||||
if err != nil {
|
||||
@@ -201,18 +177,18 @@ func (s *Service) importBatches(ctx context.Context) {
|
||||
if imported == 0 {
|
||||
return
|
||||
}
|
||||
backfillBatchesImported.Add(float64(imported))
|
||||
batchesImported.Add(float64(imported))
|
||||
}()
|
||||
current := s.clock.CurrentSlot()
|
||||
for i := range importable {
|
||||
ib := importable[i]
|
||||
if len(ib.results) == 0 {
|
||||
if len(ib.blocks) == 0 {
|
||||
log.WithFields(ib.logFields()).Error("Batch with no results, skipping importer")
|
||||
}
|
||||
_, err := s.batchImporter(ctx, current, ib, s.store)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(ib.logFields()).Debug("Backfill batch failed to import")
|
||||
s.downscorePeer(ib.blockPid, "backfillBatchImportError")
|
||||
s.downscorePeer(ib.blockPid, "backfillBatchImportError", err)
|
||||
s.batchSeq.update(ib.withState(batchErrRetryable))
|
||||
// If a batch fails, the subsequent batches are no longer considered importable.
|
||||
break
|
||||
@@ -227,7 +203,19 @@ func (s *Service) importBatches(ctx context.Context) {
|
||||
WithField("batchesRemaining", nt).
|
||||
Info("Backfill batches processed")
|
||||
|
||||
backfillRemainingBatches.Set(float64(nt))
|
||||
batchesRemaining.Set(float64(nt))
|
||||
}
|
||||
|
||||
func (s *Service) defaultBatchImporter(ctx context.Context, current primitives.Slot, b batch, su *Store) (*dbval.BackfillStatus, error) {
|
||||
status := su.status()
|
||||
if err := b.ensureParent(bytesutil.ToBytes32(status.LowParentRoot)); err != nil {
|
||||
return status, err
|
||||
}
|
||||
// Import blocks to db and update db state to reflect the newly imported blocks.
|
||||
// Other parts of the beacon node may use the same StatusUpdater instance
|
||||
// via the coverage.AvailableBlocker interface to safely determine if a given slot has been backfilled.
|
||||
|
||||
return su.fillBack(ctx, current, b.blocks, newMultiStore(s.fuluStart, b))
|
||||
}
|
||||
|
||||
func (s *Service) scheduleTodos() {
|
||||
@@ -261,25 +249,19 @@ func (s *Service) Start() {
|
||||
log.Info("Backfill service is shutting down")
|
||||
cancel()
|
||||
}()
|
||||
clock, err := s.cw.WaitForClock(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Backfill service failed to start while waiting for genesis data")
|
||||
return
|
||||
}
|
||||
s.clock = clock
|
||||
v, err := s.verifierWaiter.WaitForInitializer(ctx)
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize blob verifier in backfill service")
|
||||
return
|
||||
}
|
||||
|
||||
if s.store.isGenesisSync() {
|
||||
log.Info("Backfill short-circuit; node synced from genesis")
|
||||
s.markComplete()
|
||||
return
|
||||
}
|
||||
|
||||
clock, err := s.cw.WaitForClock(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Backfill service failed to start while waiting for genesis data")
|
||||
return
|
||||
}
|
||||
s.clock = clock
|
||||
status := s.store.status()
|
||||
// Exit early if there aren't going to be any batches to backfill.
|
||||
if primitives.Slot(status.LowSlot) <= s.ms(s.clock.CurrentSlot()) {
|
||||
@@ -289,11 +271,6 @@ func (s *Service) Start() {
|
||||
s.markComplete()
|
||||
return
|
||||
}
|
||||
s.verifier, s.ctxMap, err = s.initVerifier(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to initialize backfill verifier")
|
||||
return
|
||||
}
|
||||
|
||||
if s.initSyncWaiter != nil {
|
||||
log.Info("Backfill service waiting for initial-sync to reach head before starting")
|
||||
@@ -302,7 +279,22 @@ func (s *Service) Start() {
|
||||
return
|
||||
}
|
||||
}
|
||||
s.pool.spawn(ctx, s.nWorkers, clock, s.pa, s.verifier, s.ctxMap, s.newBlobVerifier, s.blobStore)
|
||||
|
||||
if s.workerCfg == nil {
|
||||
s.workerCfg = &workerCfg{
|
||||
clock: s.clock,
|
||||
blobStore: s.blobStore,
|
||||
colStore: s.dcStore,
|
||||
downscore: s.downscorePeer,
|
||||
}
|
||||
s.workerCfg, err = initWorkerCfg(ctx, s.workerCfg, s.verifierWaiter, s.store)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize blob verifier in backfill service")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.pool.spawn(ctx, s.nWorkers, s.pa, s.workerCfg)
|
||||
s.batchSeq = newBatchSequencer(s.nWorkers, s.ms(s.clock.CurrentSlot()), primitives.Slot(status.LowSlot), primitives.Slot(s.batchSize))
|
||||
if err = s.initBatches(); err != nil {
|
||||
log.WithError(err).Error("Non-recoverable error in backfill service")
|
||||
@@ -367,6 +359,12 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.
|
||||
}
|
||||
}
|
||||
|
||||
func newDataColumnVerifierFromInitializer(ini *verification.Initializer) verification.NewDataColumnsVerifier {
|
||||
return func(cols []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return ini.NewDataColumnsVerifier(cols, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) markComplete() {
|
||||
close(s.complete)
|
||||
log.Info("Backfill service marked as complete")
|
||||
@@ -381,7 +379,11 @@ func (s *Service) WaitForCompletion() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) downscorePeer(peerID peer.ID, reason string) {
|
||||
func (s *Service) downscorePeer(peerID peer.ID, reason string, err error) {
|
||||
newScore := s.p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
logArgs := log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore})
|
||||
if err != nil {
|
||||
logArgs = logArgs.WithError(err)
|
||||
}
|
||||
logArgs.Debug("Downscore peer")
|
||||
}
|
||||
|
||||
@@ -57,7 +57,8 @@ func TestServiceInit(t *testing.T) {
|
||||
pool := &mockPool{todoChan: make(chan batch, nWorkers), finishedChan: make(chan batch, nWorkers)}
|
||||
p2pt := p2ptest.NewTestP2P(t)
|
||||
bfs := filesystem.NewEphemeralBlobStorage(t)
|
||||
srv, err := NewService(ctx, su, bfs, cw, p2pt, &mockAssigner{},
|
||||
dcs := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
srv, err := NewService(ctx, su, bfs, dcs, cw, p2pt, &mockAssigner{},
|
||||
WithBatchSize(batchSize), WithWorkerCount(nWorkers), WithEnableBackfill(true), WithVerifierWaiter(&mockInitalizerWaiter{}))
|
||||
require.NoError(t, err)
|
||||
srv.ms = mockMinimumSlotter{min: primitives.Slot(high - batchSize*uint64(nBatches))}.minimumSlot
|
||||
|
||||
@@ -74,7 +74,7 @@ func (s *Store) status() *dbval.BackfillStatus {
|
||||
// fillBack saves the slice of blocks and updates the BackfillStatus LowSlot/Root/ParentRoot tracker to the values
|
||||
// from the first block in the slice. This method assumes that the block slice has been fully validated and
|
||||
// sorted in slot order by the calling function.
|
||||
func (s *Store) fillBack(ctx context.Context, current primitives.Slot, blocks []blocks.ROBlock, store das.AvailabilityStore) (*dbval.BackfillStatus, error) {
|
||||
func (s *Store) fillBack(ctx context.Context, current primitives.Slot, blocks []blocks.ROBlock, store das.AvailabilityChecker) (*dbval.BackfillStatus, error) {
|
||||
status := s.status()
|
||||
if len(blocks) == 0 {
|
||||
return status, nil
|
||||
@@ -88,10 +88,8 @@ func (s *Store) fillBack(ctx context.Context, current primitives.Slot, blocks []
|
||||
status.LowParentRoot, highest.Root(), status.LowSlot, highest.Block().Slot())
|
||||
}
|
||||
|
||||
for i := range blocks {
|
||||
if err := store.IsDataAvailable(ctx, current, blocks[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := store.IsDataAvailable(ctx, current, blocks...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.store.SaveROBlocks(ctx, blocks, false); err != nil {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -22,18 +21,34 @@ var errUnknownDomain = errors.New("runtime error looking up signing domain for f
|
||||
type verifiedROBlocks []blocks.ROBlock
|
||||
|
||||
func (v verifiedROBlocks) blobIdents(retentionStart primitives.Slot) ([]blobSummary, error) {
|
||||
// early return if the newest block is outside the retention window
|
||||
if len(v) > 0 && v[len(v)-1].Block().Slot() < retentionStart {
|
||||
if len(v) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
latest := v[len(v)-1].Block().Slot()
|
||||
// early return if the newest block is outside the retention window
|
||||
if latest < retentionStart {
|
||||
return nil, nil
|
||||
}
|
||||
fuluStart := params.BeaconConfig().FuluForkEpoch
|
||||
// If the batch end slot or last result block are pre-fulu, so are the rest.
|
||||
if slots.ToEpoch(latest) >= fuluStart {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
bs := make([]blobSummary, 0)
|
||||
for i := range v {
|
||||
if v[i].Block().Slot() < retentionStart {
|
||||
slot := v[i].Block().Slot()
|
||||
if slot < retentionStart {
|
||||
continue
|
||||
}
|
||||
if v[i].Block().Version() < version.Deneb {
|
||||
continue
|
||||
}
|
||||
// Assuming blocks are sorted, as soon as we see 1 fulu block we know the rest will also be fulu.
|
||||
if slots.ToEpoch(slot) >= fuluStart {
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
c, err := v[i].Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unexpected error checking commitments for block root %#x", v[i].Root())
|
||||
@@ -56,37 +71,31 @@ type verifier struct {
|
||||
domain *domainCache
|
||||
}
|
||||
|
||||
// TODO: rewrite this to use ROBlock.
|
||||
func (vr verifier) verify(blks []interfaces.ReadOnlySignedBeaconBlock) (verifiedROBlocks, error) {
|
||||
func (vr verifier) verify(blks []blocks.ROBlock) (verifiedROBlocks, error) {
|
||||
var err error
|
||||
result := make([]blocks.ROBlock, len(blks))
|
||||
sigSet := bls.NewSet()
|
||||
for i := range blks {
|
||||
result[i], err = blocks.NewROBlock(blks[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if i > 0 && result[i-1].Root() != result[i].Block().ParentRoot() {
|
||||
p, b := result[i-1], result[i]
|
||||
if i > 0 && blks[i-1].Root() != blks[i].Block().ParentRoot() {
|
||||
p, b := blks[i-1], blks[i]
|
||||
return nil, errors.Wrapf(errInvalidBatchChain,
|
||||
"slot %d parent_root=%#x, slot %d root=%#x",
|
||||
b.Block().Slot(), b.Block().ParentRoot(),
|
||||
p.Block().Slot(), p.Root())
|
||||
}
|
||||
set, err := vr.blockSignatureBatch(result[i])
|
||||
set, err := vr.blockSignatureBatch(blks[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "block signature batch")
|
||||
}
|
||||
sigSet.Join(set)
|
||||
}
|
||||
v, err := sigSet.Verify()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "block signature verification error")
|
||||
return nil, errors.Wrap(err, "SignatureBatch Verify")
|
||||
}
|
||||
if !v {
|
||||
return nil, errors.New("batch block signature verification failed")
|
||||
return nil, errors.New("SignatureBatch Verify invalid")
|
||||
}
|
||||
return result, nil
|
||||
return blks, nil
|
||||
}
|
||||
|
||||
func (vr verifier) blockSignatureBatch(b blocks.ROBlock) (*bls.SignatureBatch, error) {
|
||||
|
||||
177
beacon-chain/sync/backfill/verify_column.go
Normal file
177
beacon-chain/sync/backfill/verify_column.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package backfill
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type columnBisector struct {
|
||||
rootKeys map[[32]byte]rootKey
|
||||
pidKeys map[peer.ID]pidKey
|
||||
columnSource map[rootKey]map[uint64]pidKey
|
||||
bisected map[pidKey][]blocks.RODataColumn
|
||||
pidIter []peer.ID
|
||||
current int
|
||||
next int
|
||||
downscore peerDownscorer
|
||||
errs []error
|
||||
failures map[rootKey]peerdas.ColumnIndices
|
||||
}
|
||||
|
||||
type pidKey *peer.ID
|
||||
type rootKey *[32]byte
|
||||
|
||||
var ErrColumnVerification = errors.New("column verification failed")
|
||||
|
||||
// TODO: write a method that iterates through the failed columns in the bisector and
|
||||
// enables the retry code to retry all the failed columns.
|
||||
|
||||
func (c *columnBisector) addPeerColumns(pid peer.ID, columns ...blocks.RODataColumn) {
|
||||
pk := c.peerIdKey(pid)
|
||||
for _, col := range columns {
|
||||
c.setColumnSource(c.rootKey(col.BlockRoot()), col.Index, pk)
|
||||
}
|
||||
}
|
||||
|
||||
// failuresFor returns the set of column indices that failed verification
|
||||
// for the given block root.
|
||||
func (c *columnBisector) failuresFor(root [32]byte) peerdas.ColumnIndices {
|
||||
return c.failures[c.rootKey(root)]
|
||||
}
|
||||
|
||||
func (c *columnBisector) failingRoots() [][32]byte {
|
||||
roots := make([][32]byte, 0, len(c.failures))
|
||||
for rk := range c.failures {
|
||||
roots = append(roots, *rk)
|
||||
}
|
||||
return roots
|
||||
}
|
||||
|
||||
func (c *columnBisector) setColumnSource(rk rootKey, idx uint64, pk pidKey) {
|
||||
if c.columnSource == nil {
|
||||
c.columnSource = make(map[rootKey]map[uint64]pidKey)
|
||||
}
|
||||
if c.columnSource[rk] == nil {
|
||||
c.columnSource[rk] = make(map[uint64]pidKey)
|
||||
}
|
||||
c.columnSource[rk][idx] = pk
|
||||
}
|
||||
|
||||
func (c *columnBisector) clearColumnSource(rk rootKey, idx uint64) {
|
||||
if c.columnSource == nil {
|
||||
return
|
||||
}
|
||||
if c.columnSource[rk] == nil {
|
||||
return
|
||||
}
|
||||
delete(c.columnSource[rk], idx)
|
||||
if len(c.columnSource[rk]) == 0 {
|
||||
delete(c.columnSource, rk)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *columnBisector) rootKey(root [32]byte) rootKey {
|
||||
ptr, ok := c.rootKeys[root]
|
||||
if ok {
|
||||
return ptr
|
||||
}
|
||||
c.rootKeys[root] = &root
|
||||
return c.rootKeys[root]
|
||||
}
|
||||
|
||||
func (c *columnBisector) peerIdKey(pid peer.ID) pidKey {
|
||||
ptr, ok := c.pidKeys[pid]
|
||||
if ok {
|
||||
return ptr
|
||||
}
|
||||
c.pidKeys[pid] = &pid
|
||||
return c.pidKeys[pid]
|
||||
}
|
||||
|
||||
func (c *columnBisector) peerFor(col blocks.RODataColumn) (pidKey, error) {
|
||||
r := c.columnSource[c.rootKey(col.BlockRoot())]
|
||||
if len(r) == 0 {
|
||||
return nil, errors.Wrap(das.ErrBisectInconsistent, "root not tracked")
|
||||
}
|
||||
if ptr, ok := r[col.Index]; ok {
|
||||
return ptr, nil
|
||||
}
|
||||
return nil, errors.Wrap(das.ErrBisectInconsistent, "index not tracked for root")
|
||||
}
|
||||
|
||||
// reset prepares the columnBisector to be used to retry failed columns.
|
||||
// it resets the peer sources of the failed columns and clears the failure records.
|
||||
func (c *columnBisector) reset() {
|
||||
// reset all column sources for failed columns
|
||||
for rk, indices := range c.failures {
|
||||
for _, idx := range indices.ToSlice() {
|
||||
c.clearColumnSource(rk, idx)
|
||||
}
|
||||
}
|
||||
c.failures = make(map[rootKey]peerdas.ColumnIndices)
|
||||
c.errs = nil
|
||||
}
|
||||
|
||||
// Bisect initializes columnBisector with the set of columns to bisect.
|
||||
func (c *columnBisector) Bisect(columns []blocks.RODataColumn) error {
|
||||
for _, col := range columns {
|
||||
pid, err := c.peerFor(col)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not lookup peer for column")
|
||||
}
|
||||
c.bisected[pid] = append(c.bisected[pid], col)
|
||||
}
|
||||
c.pidIter = make([]peer.ID, 0, len(c.bisected))
|
||||
for pid := range c.bisected {
|
||||
c.pidIter = append(c.pidIter, *pid)
|
||||
}
|
||||
// The implementation of Next() assumes these are equal in
|
||||
// the base case.
|
||||
c.current, c.next = 0, 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Next implements an iterator for the columnBisector.
|
||||
// Each batch is from a single peer.
|
||||
func (c *columnBisector) Next() ([]blocks.RODataColumn, error) {
|
||||
if c.next >= len(c.pidIter) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
c.current = c.next
|
||||
pid := c.pidIter[c.current]
|
||||
cols := c.bisected[c.peerIdKey(pid)]
|
||||
c.next += 1
|
||||
return cols, nil
|
||||
}
|
||||
|
||||
// Error implements das.Bisector.
|
||||
func (c *columnBisector) Error() error {
|
||||
if len(c.errs) > 0 {
|
||||
return ErrColumnVerification
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnError implements das.Bisector.
|
||||
func (c *columnBisector) OnError(err error) {
|
||||
c.errs = append(c.errs, err)
|
||||
pid := c.pidIter[c.current]
|
||||
c.downscore(pid, "column verification error", err)
|
||||
}
|
||||
|
||||
var _ das.Bisector = &columnBisector{}
|
||||
|
||||
func newColumnBisector(downscorer peerDownscorer) *columnBisector {
|
||||
return &columnBisector{
|
||||
rootKeys: make(map[[32]byte]rootKey),
|
||||
pidKeys: make(map[peer.ID]pidKey),
|
||||
columnSource: make(map[rootKey]map[uint64]pidKey),
|
||||
bisected: make(map[pidKey][]blocks.RODataColumn),
|
||||
downscore: downscorer,
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -70,12 +69,7 @@ func TestVerify(t *testing.T) {
|
||||
}
|
||||
v, err := newBackfillVerifier(vr, pubkeys)
|
||||
require.NoError(t, err)
|
||||
notrob := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
// We have to unwrap the ROBlocks for this code because that's what it expects (for now).
|
||||
for i := range blks {
|
||||
notrob[i] = blks[i].ReadOnlySignedBeaconBlock
|
||||
}
|
||||
vbs, err := v.verify(notrob)
|
||||
vbs, err := v.verify(blks)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(blks), len(vbs))
|
||||
}
|
||||
|
||||
@@ -9,9 +9,54 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type peerDownscorer func(peer.ID, string, error)
|
||||
|
||||
type workerCfg struct {
|
||||
clock *startup.Clock
|
||||
verifier *verifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
newVB verification.NewBlobVerifier
|
||||
newVC verification.NewDataColumnsVerifier
|
||||
blobStore *filesystem.BlobStorage
|
||||
colStore *filesystem.DataColumnStorage
|
||||
downscore peerDownscorer
|
||||
}
|
||||
|
||||
func initWorkerCfg(ctx context.Context, cfg *workerCfg, vw InitializerWaiter, store *Store) (*workerCfg, error) {
|
||||
vi, err := vw.WaitForInitializer(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cps, err := store.originState(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys, err := cps.PublicKeys()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve public keys for all validators in the origin state")
|
||||
}
|
||||
vr := cps.GenesisValidatorsRoot()
|
||||
cm, err := sync.ContextByteVersionsForValRoot(bytesutil.ToBytes32(vr))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to initialize context version map using genesis validator root %#x", vr)
|
||||
}
|
||||
v, err := newBackfillVerifier(vr, keys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "newBackfillVerifier failed")
|
||||
}
|
||||
cfg.verifier = v
|
||||
cfg.ctxMap = cm
|
||||
cfg.newVB = newBlobVerifierFromInitializer(vi)
|
||||
cfg.newVC = newDataColumnVerifierFromInitializer(vi)
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
type workerId int
|
||||
|
||||
type p2pWorker struct {
|
||||
@@ -19,23 +64,40 @@ type p2pWorker struct {
|
||||
todo chan batch
|
||||
done chan batch
|
||||
p2p p2p.P2P
|
||||
v *verifier
|
||||
c *startup.Clock
|
||||
cm sync.ContextByteVersions
|
||||
nbv verification.NewBlobVerifier
|
||||
bfs *filesystem.BlobStorage
|
||||
cfg *workerCfg
|
||||
}
|
||||
|
||||
func newP2pWorker(id workerId, p p2p.P2P, todo, done chan batch, cfg *workerCfg) *p2pWorker {
|
||||
return &p2pWorker{
|
||||
id: id,
|
||||
todo: todo,
|
||||
done: done,
|
||||
p2p: p,
|
||||
cfg: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *p2pWorker) run(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case b := <-w.todo:
|
||||
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("Backfill worker received batch")
|
||||
if b.state == batchBlobSync {
|
||||
w.done <- w.handleBlobs(ctx, b)
|
||||
} else {
|
||||
w.done <- w.handleBlocks(ctx, b)
|
||||
if err := b.waitUntilReady(ctx); err != nil {
|
||||
log.WithField("batchId", b.id()).WithError(ctx.Err()).Info("Worker context canceled while waiting to retry")
|
||||
continue
|
||||
}
|
||||
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Trace("Backfill worker received batch")
|
||||
switch b.state {
|
||||
case batchSyncBlobs:
|
||||
b = w.handleBlobs(ctx, b)
|
||||
case batchSyncColumns:
|
||||
b = w.handleColumns(ctx, b)
|
||||
case batchSequenced:
|
||||
b = w.handleBlocks(ctx, b)
|
||||
default:
|
||||
log.WithFields(b.logFields()).WithField("backfillWorker", w.id).Debug("batch in unhandled state")
|
||||
panic("unhandled batch state") // lint:nopanic -- TODO: this panic is temporary / for debugging.
|
||||
}
|
||||
w.done <- b
|
||||
case <-ctx.Done():
|
||||
log.WithField("backfillWorker", w.id).Info("Backfill worker exiting after context canceled")
|
||||
return
|
||||
@@ -43,23 +105,51 @@ func (w *p2pWorker) run(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func resetRetryableColumns(b batch) batch {
|
||||
// return the given batch as-is if it isn't in a state that this func should handle.
|
||||
if b.columns == nil || b.columns.bisector == nil || len(b.columns.bisector.errs) == 0 {
|
||||
return b
|
||||
}
|
||||
bisector := b.columns.bisector
|
||||
roots := bisector.failingRoots()
|
||||
if len(roots) == 0 {
|
||||
return b
|
||||
}
|
||||
// Add all the failed columns back to the toDownload structure.
|
||||
for _, root := range roots {
|
||||
bc := b.columns.toDownload[root]
|
||||
bc.remaining.Union(bisector.failuresFor(root))
|
||||
}
|
||||
b.columns.bisector.reset()
|
||||
return b
|
||||
}
|
||||
|
||||
func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
|
||||
cs := w.c.CurrentSlot()
|
||||
blobRetentionStart, err := sync.BlobRPCMinValidSlot(cs)
|
||||
current := w.cfg.clock.CurrentSlot()
|
||||
// TODO: refactor all the blob and column setup stuff.
|
||||
// we know the slot when we first set up the batch, so we should be able to determine if we need the blob setup bits at all
|
||||
// before we fetch the blocks. Same goes for the column dependencies.
|
||||
blobRetentionStart, err := sync.BlobRPCMinValidSlot(current)
|
||||
if err != nil {
|
||||
return b.withRetryableError(errors.Wrap(err, "configuration issue, could not compute minimum blob retention slot"))
|
||||
}
|
||||
b.blockPid = b.busy
|
||||
b.blockPid = b.peer
|
||||
start := time.Now()
|
||||
results, err := sync.SendBeaconBlocksByRangeRequest(ctx, w.c, w.p2p, b.blockPid, b.blockRequest(), blockValidationMetrics)
|
||||
dlt := time.Now()
|
||||
backfillBatchTimeDownloadingBlocks.Observe(float64(dlt.Sub(start).Milliseconds()))
|
||||
results, err := sync.SendBeaconBlocksByRangeRequest(ctx, w.cfg.clock, w.p2p, b.blockPid, b.blockRequest(), blockValidationMetrics)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(b.logFields()).Debug("Batch requesting failed")
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
vb, err := w.v.verify(results)
|
||||
backfillBatchTimeVerifying.Observe(float64(time.Since(dlt).Milliseconds()))
|
||||
dlt := time.Now()
|
||||
blockDownloadMs.Observe(float64(dlt.Sub(start).Milliseconds()))
|
||||
toVerify, err := blocks.NewROBlockSlice(results)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(b.logFields()).Debug("Batch conversion to ROBlock failed")
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
|
||||
vb, err := w.cfg.verifier.verify(toVerify)
|
||||
blockVerifyMs.Observe(float64(time.Since(dlt).Milliseconds()))
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(b.logFields()).Debug("Batch validation failed")
|
||||
return b.withRetryableError(err)
|
||||
@@ -71,46 +161,85 @@ func (w *p2pWorker) handleBlocks(ctx context.Context, b batch) batch {
|
||||
for i := range vb {
|
||||
bdl += vb[i].SizeSSZ()
|
||||
}
|
||||
backfillBlocksApproximateBytes.Add(float64(bdl))
|
||||
blockDownloadBytesApprox.Add(float64(bdl))
|
||||
log.WithFields(b.logFields()).WithField("dlbytes", bdl).Debug("Backfill batch block bytes downloaded")
|
||||
bs, err := newBlobSync(cs, vb, &blobSyncConfig{retentionStart: blobRetentionStart, nbv: w.nbv, store: w.bfs})
|
||||
bscfg := &blobSyncConfig{retentionStart: blobRetentionStart, nbv: w.cfg.newVB, store: w.cfg.blobStore}
|
||||
bs, err := newBlobSync(current, vb, bscfg)
|
||||
if err != nil {
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
return b.withResults(vb, bs)
|
||||
cs, err := newColumnSync(b, vb, current, w.p2p, vb, w.cfg)
|
||||
if err != nil {
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
b.blocks = vb
|
||||
b.blobs = bs
|
||||
b.columns = cs
|
||||
return b.transitionToNext()
|
||||
}
|
||||
|
||||
func (w *p2pWorker) handleBlobs(ctx context.Context, b batch) batch {
|
||||
b.blobPid = b.busy
|
||||
b.blobs.pid = b.peer
|
||||
start := time.Now()
|
||||
// we don't need to use the response for anything other than metrics, because blobResponseValidation
|
||||
// adds each of them to a batch AvailabilityStore once it is checked.
|
||||
blobs, err := sync.SendBlobsByRangeRequest(ctx, w.c, w.p2p, b.blobPid, w.cm, b.blobRequest(), b.blobResponseValidator(), blobValidationMetrics)
|
||||
blobs, err := sync.SendBlobsByRangeRequest(ctx, w.cfg.clock, w.p2p, b.blobs.pid, w.cfg.ctxMap, b.blobRequest(), b.blobs.validateNext, blobValidationMetrics)
|
||||
if err != nil {
|
||||
b.bs = nil
|
||||
b.blobs = nil
|
||||
return b.withRetryableError(err)
|
||||
}
|
||||
dlt := time.Now()
|
||||
backfillBatchTimeDownloadingBlobs.Observe(float64(dlt.Sub(start).Milliseconds()))
|
||||
blobSidecarDownloadMs.Observe(float64(dlt.Sub(start).Milliseconds()))
|
||||
if len(blobs) > 0 {
|
||||
// All blobs are the same size, so we can compute 1 and use it for all in the batch.
|
||||
sz := blobs[0].SizeSSZ() * len(blobs)
|
||||
backfillBlobsApproximateBytes.Add(float64(sz))
|
||||
blobSidecarDownloadBytesApprox.Add(float64(sz))
|
||||
log.WithFields(b.logFields()).WithField("dlbytes", sz).Debug("Backfill batch blob bytes downloaded")
|
||||
}
|
||||
return b.postBlobSync()
|
||||
if b.blobs.needed() > 0 {
|
||||
// If we are missing blobs after processing the blob step, this is an error and we need to scrap the batch and start over.
|
||||
b.blobs = nil
|
||||
b.blocks = []blocks.ROBlock{}
|
||||
return b.withRetryableError(errors.New("missing blobs after blob download"))
|
||||
}
|
||||
return b.transitionToNext()
|
||||
}
|
||||
|
||||
func newP2pWorker(id workerId, p p2p.P2P, todo, done chan batch, c *startup.Clock, v *verifier, cm sync.ContextByteVersions, nbv verification.NewBlobVerifier, bfs *filesystem.BlobStorage) *p2pWorker {
|
||||
return &p2pWorker{
|
||||
id: id,
|
||||
todo: todo,
|
||||
done: done,
|
||||
p2p: p,
|
||||
v: v,
|
||||
c: c,
|
||||
cm: cm,
|
||||
nbv: nbv,
|
||||
bfs: bfs,
|
||||
func (w *p2pWorker) handleColumns(ctx context.Context, b batch) batch {
|
||||
start := time.Now()
|
||||
b.columns.peer = b.peer
|
||||
|
||||
// Bisector is used to keep track of the peer that provided each column, for scoring purposes.
|
||||
// When verification of a batch of columns fails, bisector is used to retry verification with batches
|
||||
// grouped by peer, to figure out if the failure is due to a specific peer.
|
||||
vr := b.validatingColumnRequest(b.columns.bisector)
|
||||
// TODO: the upstream definition of SendDataColumnSidecarsByRangeRequest requires this params type
|
||||
// which has several ambiguously optional fields. The sidecar request functions should be refactored
|
||||
// to use a more explicit set of parameters. RateLimiter, Storage and NewVerifier are not used inside
|
||||
// SendDataColumnSidecarsByRangeRequest.
|
||||
p := sync.DataColumnSidecarsParams{
|
||||
Ctx: ctx,
|
||||
Tor: w.cfg.clock,
|
||||
P2P: w.p2p,
|
||||
//RateLimiter *leakybucket.Collector
|
||||
CtxMap: w.cfg.ctxMap,
|
||||
//Storage: w.cfg.cfs,
|
||||
//NewVerifier: vr.validate,
|
||||
}
|
||||
// The return is dropped because the validation code adds the columns
|
||||
// to the columnSync AvailabilityStore under the hood.
|
||||
_, err := sync.SendDataColumnSidecarsByRangeRequest(p, b.columns.peer, vr.req, vr.validate)
|
||||
if err != nil {
|
||||
if shouldDownscore(err) {
|
||||
w.cfg.downscore(b.columns.peer, "bad SendDataColumnSidecarsByRangeRequest response", err)
|
||||
}
|
||||
return b.withRetryableError(errors.Wrap(err, "failed to request data column sidecars"))
|
||||
}
|
||||
dataColumnSidecarDownloadMs.Observe(float64(time.Since(start).Milliseconds()))
|
||||
return b.transitionToNext()
|
||||
}
|
||||
|
||||
func shouldDownscore(err error) bool {
|
||||
return errors.Is(err, errInvalidDataColumnResponse) ||
|
||||
errors.Is(err, sync.ErrInvalidFetchedData)
|
||||
}
|
||||
|
||||
236
beacon-chain/sync/data_column_assignment.go
Normal file
236
beacon-chain/sync/data_column_assignment.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"math"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DASPeerCache caches information about a set of peers DAS peering decisions.
|
||||
type DASPeerCache struct {
|
||||
p2pSvc p2p.P2P
|
||||
peers map[peer.ID]*dasPeer
|
||||
}
|
||||
|
||||
// dasPeer represents a peer's custody of columns and their coverage score.
|
||||
type dasPeer struct {
|
||||
pid peer.ID
|
||||
enid enode.ID
|
||||
custodied peerdas.ColumnIndices
|
||||
lastAssigned time.Time
|
||||
}
|
||||
|
||||
// dasPeerScore is used to build a slice of peer+score pairs for ranking purproses.
|
||||
type dasPeerScore struct {
|
||||
peer *dasPeer
|
||||
score float64
|
||||
}
|
||||
|
||||
// PeerPicker is a structure that maps out the intersection of peer custody and column indices
|
||||
// to weight each peer based on the scarcity of the columns they custody. This allows us to prioritize
|
||||
// requests for more scarce columns to peers that custody them, so that we don't waste our bandwidth allocation
|
||||
// making requests for more common columns from peers that can provide the more scarce columns.
|
||||
type PeerPicker struct {
|
||||
scores []*dasPeerScore // scores is a set of generic scores, based on the full custody group set
|
||||
ranker *rarityRanker
|
||||
custodians map[uint64][]*dasPeer
|
||||
toCustody peerdas.ColumnIndices // full set of columns this node will try to custody
|
||||
reqInterval time.Duration
|
||||
}
|
||||
|
||||
// NewDASPeerCache initializes a DASPeerCache. This type is not currently thread safe.
|
||||
func NewDASPeerCache(p2pSvc p2p.P2P) *DASPeerCache {
|
||||
return &DASPeerCache{
|
||||
peers: make(map[peer.ID]*dasPeer),
|
||||
p2pSvc: p2pSvc,
|
||||
}
|
||||
}
|
||||
|
||||
// NewColumnScarcityRanking computes the ColumnScarcityRanking based on the current view of columns custodied
|
||||
// by the given set of peers. New PeerPickers should be created somewhat frequently, as the status of peers can
|
||||
// change, including the set of columns each peer custodies.
|
||||
// reqInterval sets the frequency that a peer can be picked in terms of time. A peer can be picked once per reqInterval,
|
||||
// eg a value of time.Second would allow 1 request per second to the peer, or a value of 500 * time.Millisecond would allow
|
||||
// 2 req/sec.
|
||||
func (c *DASPeerCache) NewPicker(pids []peer.ID, toCustody peerdas.ColumnIndices, reqInterval time.Duration) (*PeerPicker, error) {
|
||||
// For each of the given peers, refresh the cache's view of their currently custodied columns.
|
||||
// Also populate 'custodians', which stores the set of peers that custody each column index.
|
||||
custodians := make(map[uint64][]*dasPeer, len(toCustody))
|
||||
scores := make([]*dasPeerScore, 0, len(pids))
|
||||
for _, pid := range pids {
|
||||
peer, err := c.refresh(pid)
|
||||
if err != nil {
|
||||
log.WithField("peerID", pid).WithError(err).Debug("Failed to convert peer ID to node ID.")
|
||||
continue
|
||||
}
|
||||
for col := range peer.custodied {
|
||||
if toCustody.Has(col) {
|
||||
custodians[col] = append(custodians[col], peer)
|
||||
}
|
||||
}
|
||||
// set score to math.MaxFloat64 so we can tell that it hasn't been initialized
|
||||
scores = append(scores, &dasPeerScore{peer: peer, score: math.MaxFloat64})
|
||||
}
|
||||
|
||||
return &PeerPicker{
|
||||
toCustody: toCustody,
|
||||
ranker: newRarityRanker(toCustody, custodians),
|
||||
custodians: custodians,
|
||||
scores: scores,
|
||||
reqInterval: reqInterval,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// refresh supports NewPicker in getting the latest dasPeer view for the given peer.ID. It caches the result
|
||||
// of the enode.ID computation but refreshes the custody group count each time it is called, leveraging the
|
||||
// cache behind peerdas.Info.
|
||||
func (c *DASPeerCache) refresh(pid peer.ID) (*dasPeer, error) {
|
||||
// Computing the enode.ID seems to involve multiple parseing and validation steps followed by a
|
||||
// hash computation, so it seems worth trying to cache the result.
|
||||
p, ok := c.peers[pid]
|
||||
if !ok {
|
||||
nodeID, err := p2p.ConvertPeerIDToNodeID(pid)
|
||||
if err != nil {
|
||||
// If we can't convert the peer ID to a node ID, remove peer from the cache.
|
||||
delete(c.peers, pid)
|
||||
return nil, err
|
||||
}
|
||||
p = &dasPeer{enid: nodeID, pid: pid}
|
||||
}
|
||||
dasInfo, _, err := peerdas.Info(p.enid, c.p2pSvc.CustodyGroupCountFromPeer(pid))
|
||||
if err != nil {
|
||||
// If we can't get the peerDAS info, remove peer from the cache.
|
||||
delete(c.peers, pid)
|
||||
return nil, errors.Wrapf(err, "CustodyGroupCountFromPeer, peerID=%s, nodeID=%s", pid, p.enid)
|
||||
}
|
||||
p.custodied = peerdas.NewColumnIndicesFromMap(dasInfo.CustodyColumns)
|
||||
c.peers[pid] = p
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// ForColumns returns the best peer to request columns from, based on the scarcity of the columns needed.
|
||||
func (m *PeerPicker) ForColumns(needed peerdas.ColumnIndices, busy map[peer.ID]bool) (peer.ID, []uint64, error) {
|
||||
// - find the custodied column with the lowest frequency
|
||||
// - collect all the peers that have custody of that column
|
||||
// - score the peers by how many other of the needed columns they ave
|
||||
// -- or, score them by the rank of the columns they have??
|
||||
var best *dasPeer
|
||||
bestScore, bestCoverage := 0.0, []uint64{}
|
||||
for _, col := range m.ranker.ascendingRarity(needed) {
|
||||
for _, p := range m.custodians[col] {
|
||||
// enforce a minimum interval between requests to the same peer
|
||||
if p.lastAssigned.Add(m.reqInterval).After(time.Now()) {
|
||||
continue
|
||||
}
|
||||
if busy[p.pid] {
|
||||
continue
|
||||
}
|
||||
covered := p.custodied.Intersection(needed)
|
||||
if len(covered) == 0 {
|
||||
continue
|
||||
}
|
||||
// update best if any of the following:
|
||||
// - current score better than previous best
|
||||
// - scores are tied, and current coverage is better than best
|
||||
// - scores are tied, coverage equal, pick the least-recently used peer
|
||||
score := m.ranker.score(covered)
|
||||
if score < bestScore {
|
||||
continue
|
||||
}
|
||||
if score == bestScore && best != nil {
|
||||
if len(covered) < len(bestCoverage) {
|
||||
continue
|
||||
}
|
||||
if len(covered) == len(bestCoverage) && best.lastAssigned.Before(p.lastAssigned) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
best, bestScore, bestCoverage = p, score, covered.ToSlice()
|
||||
}
|
||||
if best != nil {
|
||||
best.lastAssigned = time.Now()
|
||||
return best.pid, bestCoverage, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil, errors.New("no peers able to cover needed columns")
|
||||
}
|
||||
|
||||
// ForBlocks returns the lowest scoring peer in the set. This can be used to pick a peer
|
||||
// for block requests, preserving the peers that have the highest coverage scores
|
||||
// for column requests.
|
||||
func (m *PeerPicker) ForBlocks(busy map[peer.ID]bool) (peer.ID, error) {
|
||||
slices.SortFunc(m.scores, func(a, b *dasPeerScore) int {
|
||||
// MaxFloat64 is used as a sentinel value for an uninitialized score;
|
||||
// check and set scores while sorting for uber-lazy initialization.
|
||||
if a.score == math.MaxFloat64 {
|
||||
a.score = m.ranker.score(a.peer.custodied.Intersection(m.toCustody))
|
||||
}
|
||||
if b.score == math.MaxFloat64 {
|
||||
b.score = m.ranker.score(b.peer.custodied.Intersection(m.toCustody))
|
||||
}
|
||||
return cmp.Compare(a.score, b.score)
|
||||
})
|
||||
for _, ds := range m.scores {
|
||||
if !busy[ds.peer.pid] {
|
||||
return ds.peer.pid, nil
|
||||
}
|
||||
}
|
||||
return "", errors.New("no peers available")
|
||||
}
|
||||
|
||||
// rarityRanker is initialized with the set of columns this node needs to custody, and the set of
|
||||
// all peer custody columns. With that information it is able to compute a numeric representation of
|
||||
// column rarity, and use that number to give each peer a score that represents how fungible their
|
||||
// bandwidth likely is relative to other peers given a more specific set of needed columns.
|
||||
type rarityRanker struct {
|
||||
// rarity maps column indices to their rarity scores.
|
||||
// The rarity score is defined as the inverse of the number of custodians: 1/custodians
|
||||
// So the rarity of the columns a peer custodies can be simply added together for a score
|
||||
// representing how unique their custody groups are; rarer columns contribute larger values to scores.
|
||||
rarity map[uint64]float64
|
||||
asc []uint64 // columns indices ordered by ascending rarity
|
||||
}
|
||||
|
||||
// newRarityRanker precomputes data used for scoring and ranking. It should be reinitialized every time
|
||||
// we refresh the set of peers or the view of the peers column custody.
|
||||
func newRarityRanker(toCustody peerdas.ColumnIndices, custodians map[uint64][]*dasPeer) *rarityRanker {
|
||||
rarity := make(map[uint64]float64, len(toCustody))
|
||||
asc := make([]uint64, 0, len(toCustody))
|
||||
for col := range toCustody {
|
||||
rarity[col] = 1 / max(1, float64(len(custodians[col])))
|
||||
asc = append(asc, col)
|
||||
}
|
||||
slices.SortFunc(asc, func(a, b uint64) int {
|
||||
return cmp.Compare(rarity[a], rarity[b])
|
||||
})
|
||||
return &rarityRanker{rarity: rarity, asc: asc}
|
||||
}
|
||||
|
||||
// rank returns the requested columns sorted by ascending rarity.
|
||||
func (rr *rarityRanker) ascendingRarity(cols peerdas.ColumnIndices) []uint64 {
|
||||
ranked := make([]uint64, 0, len(cols))
|
||||
for _, col := range rr.asc {
|
||||
if cols.Has(col) {
|
||||
ranked = append(ranked, col)
|
||||
}
|
||||
}
|
||||
return ranked
|
||||
}
|
||||
|
||||
// score gives a score representing the sum of the rarity scores of the given columns. It can be used to
|
||||
// score peers based on the set intersection of their custodied indices and the indices we need to request.
|
||||
func (rr *rarityRanker) score(coverage peerdas.ColumnIndices) float64 {
|
||||
score := 0.0
|
||||
for col := range coverage {
|
||||
score += rr.rarity[col]
|
||||
}
|
||||
return score
|
||||
}
|
||||
@@ -29,13 +29,14 @@ import (
|
||||
// DataColumnSidecarsParams stores the common parameters needed to
|
||||
// fetch data column sidecars from peers.
|
||||
type DataColumnSidecarsParams struct {
|
||||
Ctx context.Context // Context
|
||||
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
|
||||
P2P prysmP2P.P2P // P2P network interface
|
||||
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
|
||||
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
|
||||
Storage filesystem.DataColumnStorageReader // Data columns storage
|
||||
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
|
||||
Ctx context.Context // Context
|
||||
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
|
||||
P2P prysmP2P.P2P // P2P network interface
|
||||
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
|
||||
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
|
||||
Storage filesystem.DataColumnStorageReader // Data columns storage
|
||||
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
|
||||
DownscorePeerOnRPCFault bool // Downscore a peer if it commits an RPC fault. Not responding sidecars at all is considered as a fault.
|
||||
}
|
||||
|
||||
// FetchDataColumnSidecars retrieves data column sidecars from storage and peers for the given
|
||||
|
||||
@@ -34,6 +34,7 @@ go_library(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -108,7 +109,9 @@ go_test(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_paulbellamy_ratecounter//:go_default_library",
|
||||
|
||||
@@ -28,10 +28,10 @@ const (
|
||||
)
|
||||
|
||||
// blockReceiverFn defines block receiving function.
|
||||
type blockReceiverFn func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
type blockReceiverFn func(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
|
||||
|
||||
// batchBlockReceiverFn defines batch receiving function.
|
||||
type batchBlockReceiverFn func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
type batchBlockReceiverFn func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityChecker) error
|
||||
|
||||
// Round Robin sync looks at the latest peer statuses and syncs up to the highest known epoch.
|
||||
//
|
||||
@@ -175,7 +175,7 @@ func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQue
|
||||
blocksWithDataColumns := bwb[fistDataColumnIndex:]
|
||||
|
||||
blobBatchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
lazilyPersistentStoreBlobs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, blobBatchVerifier)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, blobBatchVerifier)
|
||||
|
||||
log := log.WithField("firstSlot", data.bwb[0].Block.Block().Slot())
|
||||
logBlobs, logDataColumns := log, log
|
||||
@@ -185,12 +185,12 @@ func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQue
|
||||
}
|
||||
|
||||
for i, b := range blocksWithBlobs {
|
||||
if err := lazilyPersistentStoreBlobs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
|
||||
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warning("Batch failure due to BlobSidecar issues")
|
||||
return uint64(i), err
|
||||
}
|
||||
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, lazilyPersistentStoreBlobs); err != nil {
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, avs); err != nil {
|
||||
if errors.Is(err, errParentDoesNotExist) {
|
||||
logBlobs.WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
|
||||
@@ -313,7 +313,7 @@ func (s *Service) processBlock(
|
||||
genesis time.Time,
|
||||
bwb blocks.BlockWithROSidecars,
|
||||
blockReceiver blockReceiverFn,
|
||||
avs das.AvailabilityStore,
|
||||
avs das.AvailabilityChecker,
|
||||
) error {
|
||||
blk := bwb.Block
|
||||
blkRoot := blk.Root()
|
||||
|
||||
@@ -376,7 +376,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityChecker) error {
|
||||
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
||||
return nil
|
||||
}, nil)
|
||||
@@ -388,7 +388,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
rowsb, err = blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityChecker) error {
|
||||
return nil
|
||||
}, nil)
|
||||
assert.ErrorContains(t, errBlockAlreadyProcessed.Error(), err)
|
||||
@@ -399,7 +399,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
rowsb, err = blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityChecker) error {
|
||||
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
||||
return nil
|
||||
}, nil)
|
||||
@@ -469,7 +469,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
|
||||
cbnormal := func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
cbnormal := func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
assert.NoError(t, s.cfg.Chain.ReceiveBlockBatch(ctx, blks, avs))
|
||||
return nil
|
||||
}
|
||||
@@ -478,7 +478,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
require.Equal(t, uint64(len(batch)), count)
|
||||
|
||||
cbnil := func(ctx context.Context, blocks []blocks.ROBlock, _ das.AvailabilityStore) error {
|
||||
cbnil := func(ctx context.Context, blocks []blocks.ROBlock, _ das.AvailabilityChecker) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -851,7 +851,7 @@ func TestService_processBlocksWithDataColumns(t *testing.T) {
|
||||
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||
}
|
||||
|
||||
receiverFunc := func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
receiverFunc := func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
require.Equal(t, 1, len(blks))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
@@ -209,6 +210,8 @@ func (s *Service) Start() {
|
||||
|
||||
// fetchOriginSidecars fetches origin sidecars
|
||||
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
const delay = 10 * time.Second // The delay between each attempt to fetch origin data column sidecars
|
||||
|
||||
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
@@ -234,7 +237,7 @@ func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
blockVersion := roBlock.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.fetchOriginColumns(peers, roBlock); err != nil {
|
||||
if err := s.fetchOriginColumns(roBlock, delay); err != nil {
|
||||
return errors.Wrap(err, "fetch origin columns")
|
||||
}
|
||||
return nil
|
||||
@@ -391,7 +394,11 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) error {
|
||||
func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration) error {
|
||||
const (
|
||||
errorMessage = "Failed to fetch origin data column sidecars"
|
||||
warningIteration = 10
|
||||
)
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early if the origin block has no blob commitments.
|
||||
@@ -420,21 +427,40 @@ func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) err
|
||||
root := roBlock.Root()
|
||||
|
||||
params := sync.DataColumnSidecarsParams{
|
||||
Ctx: s.ctx,
|
||||
Tor: s.clock,
|
||||
P2P: s.cfg.P2P,
|
||||
CtxMap: s.ctxMap,
|
||||
Storage: s.cfg.DataColumnStorage,
|
||||
NewVerifier: s.newDataColumnsVerifier,
|
||||
Ctx: s.ctx,
|
||||
Tor: s.clock,
|
||||
P2P: s.cfg.P2P,
|
||||
CtxMap: s.ctxMap,
|
||||
Storage: s.cfg.DataColumnStorage,
|
||||
NewVerifier: s.newDataColumnsVerifier,
|
||||
DownscorePeerOnRPCFault: true,
|
||||
}
|
||||
|
||||
verfifiedRoDataColumnsByRoot, err := sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch data column sidecars")
|
||||
var verifiedRoDataColumnsByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn
|
||||
for attempt := uint64(0); ; attempt++ {
|
||||
verifiedRoDataColumnsByRoot, err = sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
log := log.WithError(err).WithFields(logrus.Fields{
|
||||
"attempt": attempt,
|
||||
"delay": delay,
|
||||
})
|
||||
|
||||
if attempt%warningIteration == 0 && attempt > 0 {
|
||||
log.Warning(errorMessage)
|
||||
time.Sleep(delay)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debug(errorMessage)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
// Save origin data columns to disk.
|
||||
verifiedRoDataColumnsSidecars, ok := verfifiedRoDataColumnsByRoot[root]
|
||||
verifiedRoDataColumnsSidecars, ok := verifiedRoDataColumnsByRoot[root]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot extract origins data column sidecars for block root %#x - should never happen", root)
|
||||
}
|
||||
@@ -447,7 +473,7 @@ func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) err
|
||||
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"blobCount": len(commitments),
|
||||
"columnCount": len(verifiedRoDataColumnsSidecars),
|
||||
}).Info("Successfully downloaded data columns for checkpoint sync block")
|
||||
}).Info("Successfully downloaded data column sidecars for checkpoint sync block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package initialsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -13,8 +14,12 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
prysmSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -22,10 +27,14 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
@@ -663,3 +672,147 @@ func TestFetchOriginSidecars(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestFetchOriginColumns(t *testing.T) {
|
||||
// Load the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test environment
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
const (
|
||||
delay = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
t.Run("block has no commitments", func(t *testing.T) {
|
||||
service := new(Service)
|
||||
|
||||
// Create a block with no blob commitments
|
||||
block := util.NewBeaconBlockFulu()
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
roBlock, err := blocks.NewROBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("FetchDataColumnSidecars succeeds immediately", func(t *testing.T) {
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
P2P: p2p,
|
||||
DataColumnStorage: storage,
|
||||
},
|
||||
}
|
||||
|
||||
// Create a block with blob commitments and sidecars
|
||||
roBlock, _, verifiedSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
// Store all sidecars in advance so FetchDataColumnSidecars succeeds immediately
|
||||
err := storage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("first attempt to FetchDataColumnSidecars fails but second attempt succeeds", func(t *testing.T) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
// Custody columns with this private key and 4-cgc: 31, 81, 97, 105
|
||||
privateKeyBytes := [32]byte{1}
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
p2p.Peers().SetConnectionState(other.PeerID(), peers.Connected)
|
||||
p2p.Connect(other)
|
||||
|
||||
p2p.Peers().SetChainState(other.PeerID(), ðpb.StatusV2{
|
||||
HeadSlot: 5,
|
||||
})
|
||||
|
||||
other.ENR().Set(peerdas.Cgc(numberOfCustodyGroups))
|
||||
p2p.Peers().UpdateENR(other.ENR(), other.PeerID())
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 0,
|
||||
Count: 1,
|
||||
Columns: []uint64{1, 17, 19, 42, 75, 87, 102, 117},
|
||||
}
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
gs := startup.NewClockSynchronizer()
|
||||
err = gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||
|
||||
// Create a block with blob commitments and sidecars
|
||||
roBlock, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
ctxMap, err := prysmSync.ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{
|
||||
ctx: t.Context(),
|
||||
clock: clock,
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
cfg: &Config{
|
||||
P2P: p2p,
|
||||
DataColumnStorage: storage,
|
||||
},
|
||||
ctxMap: ctxMap,
|
||||
}
|
||||
|
||||
// Do not respond any sidecar on the first attempt, and respond everything requested on the second one.
|
||||
firstAttempt := true
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||
|
||||
if firstAttempt {
|
||||
firstAttempt = false
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, column := range actualRequest.Columns {
|
||||
err = prysmSync.WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedRoSidecars[column].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check all corresponding sidecars are saved in the store.
|
||||
summary := storage.Summary(roBlock.Root())
|
||||
for _, index := range expectedRequest.Columns {
|
||||
require.Equal(t, true, summary.HasIndex(index))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"sort"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -408,6 +409,7 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
p DataColumnSidecarsParams,
|
||||
pid peer.ID,
|
||||
request *ethpb.DataColumnSidecarsByRangeRequest,
|
||||
vfs ...DataColumnResponseValidation,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
// Return early if nothing to request.
|
||||
if request == nil || request.Count == 0 || len(request.Columns) == 0 {
|
||||
@@ -453,10 +455,24 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
// Send the request.
|
||||
stream, err := p.P2P.Send(p.Ctx, request, topic, pid)
|
||||
if err != nil {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, pid, "cannotSendDataColumnSidecarsByRangeRequest")
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "p2p send")
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
|
||||
requestedSlot, err := isSidecarSlotRequested(request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "is sidecar slot within bounds")
|
||||
}
|
||||
vfs = append([]DataColumnResponseValidation{
|
||||
isInSlotOrder(),
|
||||
isSidecarIndexRequested(request),
|
||||
requestedSlot,
|
||||
}, vfs...)
|
||||
|
||||
// Read the data column sidecars from the stream.
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, totalCount)
|
||||
for range totalCount {
|
||||
@@ -465,20 +481,19 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
validatorSlotWithinBounds, err := isSidecarSlotWithinBounds(request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "is sidecar slot within bounds")
|
||||
}
|
||||
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(
|
||||
stream, p.P2P, p.CtxMap,
|
||||
validatorSlotWithinBounds,
|
||||
isSidecarIndexRequested(request),
|
||||
)
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap, vfs...)
|
||||
if errors.Is(err, io.EOF) {
|
||||
if p.DownscorePeerOnRPCFault && len(roDataColumns) == 0 {
|
||||
downscorePeer(p.P2P, pid, "noReturnedSidecar")
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
if err != nil {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, pid, "readChunkedDataColumnSidecarError")
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "read chunked data column sidecar")
|
||||
}
|
||||
|
||||
@@ -491,14 +506,36 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
|
||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, pid, "tooManyResponseDataColumnSidecars")
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", totalCount)
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// isSidecarSlotWithinBounds verifies that the slot of the data column sidecar is within the bounds of the request.
|
||||
func isSidecarSlotWithinBounds(request *ethpb.DataColumnSidecarsByRangeRequest) (DataColumnResponseValidation, error) {
|
||||
var (
|
||||
errSidecarRPCValidation = errors.Wrap(ErrInvalidFetchedData, "DataColumnSidecar")
|
||||
errSidecarSlotsUnordered = errors.Wrap(errSidecarRPCValidation, "slots not in ascending order")
|
||||
errSidecarSlotNotRequested = errors.Wrap(errSidecarRPCValidation, "sidecar slot not in range")
|
||||
errSidecarIndexNotRequested = errors.Wrap(errSidecarRPCValidation, "sidecar index not requested")
|
||||
)
|
||||
|
||||
func isInSlotOrder() DataColumnResponseValidation {
|
||||
var prev primitives.Slot
|
||||
|
||||
return func(sidecar blocks.RODataColumn) error {
|
||||
if prev > sidecar.Slot() {
|
||||
return errors.Wrapf(errSidecarSlotsUnordered, "got=%d, want>=%d", sidecar.Slot(), prev)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// isSidecarSlotRequested verifies that the slot of the data column sidecar is within the bounds of the request.
|
||||
func isSidecarSlotRequested(request *ethpb.DataColumnSidecarsByRangeRequest) (DataColumnResponseValidation, error) {
|
||||
// endSlot is exclusive (while request.StartSlot is inclusive).
|
||||
endSlot, err := request.StartSlot.SafeAdd(request.Count)
|
||||
if err != nil {
|
||||
@@ -509,7 +546,7 @@ func isSidecarSlotWithinBounds(request *ethpb.DataColumnSidecarsByRangeRequest)
|
||||
slot := sidecar.Slot()
|
||||
|
||||
if !(request.StartSlot <= slot && slot < endSlot) {
|
||||
return errors.Errorf("data column sidecar slot %d out of range [%d, %d[", slot, request.StartSlot, endSlot)
|
||||
return errors.Wrapf(errSidecarSlotNotRequested, "got=%d, want=[%d, %d)", slot, request.StartSlot, endSlot)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -528,7 +565,8 @@ func isSidecarIndexRequested(request *ethpb.DataColumnSidecarsByRangeRequest) Da
|
||||
return func(sidecar blocks.RODataColumn) error {
|
||||
columnIndex := sidecar.Index
|
||||
if !requestedIndices[columnIndex] {
|
||||
return errors.Errorf("data column sidecar index %d not found in requested indices", columnIndex)
|
||||
requested := sortedSliceFromMap(requestedIndices)
|
||||
return errors.Wrapf(errSidecarIndexNotRequested, "%d not in %v", columnIndex, requested)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -566,6 +604,10 @@ func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer
|
||||
// Send the request to the peer.
|
||||
stream, err := p.P2P.Send(p.Ctx, identifiers, topic, peer)
|
||||
if err != nil {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, peer, "cannotSendDataColumnSidecarsByRootRequest")
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "p2p api send")
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
@@ -577,9 +619,17 @@ func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer
|
||||
for range count {
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap, isSidecarIndexRootRequested(identifiers))
|
||||
if errors.Is(err, io.EOF) {
|
||||
if p.DownscorePeerOnRPCFault && len(roDataColumns) == 0 {
|
||||
downscorePeer(p.P2P, peer, "noReturnedSidecar")
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
if err != nil {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, peer, "readChunkedDataColumnSidecarError")
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "read chunked data column sidecar")
|
||||
}
|
||||
|
||||
@@ -592,6 +642,10 @@ func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer
|
||||
|
||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, peer, "tooManyResponseDataColumnSidecars")
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", count)
|
||||
}
|
||||
|
||||
@@ -689,3 +743,24 @@ func readChunkedDataColumnSidecar(
|
||||
|
||||
return &roDataColumn, nil
|
||||
}
|
||||
|
||||
func downscorePeer(p2p p2p.P2P, peerID peer.ID, reason string, fields ...logrus.Fields) {
|
||||
log := log
|
||||
for _, field := range fields {
|
||||
log = log.WithFields(field)
|
||||
}
|
||||
|
||||
newScore := p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
func DataColumnSidecarsByRangeRequest(columns []uint64, start, end primitives.Slot) *ethpb.DataColumnSidecarsByRangeRequest {
|
||||
sort.Slice(columns, func(i, j int) bool {
|
||||
return columns[i] < columns[j]
|
||||
})
|
||||
return ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: start,
|
||||
Count: uint64(end-start) + 1,
|
||||
Columns: columns,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1071,7 +1071,7 @@ func TestIsSidecarSlotWithinBounds(t *testing.T) {
|
||||
Count: 10,
|
||||
}
|
||||
|
||||
validator, err := isSidecarSlotWithinBounds(request)
|
||||
validator, err := isSidecarSlotRequested(request)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
|
||||
@@ -60,7 +60,7 @@ var (
|
||||
SpectestDataColumnSidecarRequirements = requirementList(GossipDataColumnSidecarRequirements).excluding(
|
||||
RequireSidecarParentSeen, RequireSidecarParentValid)
|
||||
|
||||
errColumnsInvalid = errors.New("data columns failed verification")
|
||||
ErrColumnsInvalid = errors.New("data columns failed verification")
|
||||
errBadTopicLength = errors.New("topic length is invalid")
|
||||
errBadTopic = errors.New("topic is not of the one expected")
|
||||
)
|
||||
@@ -84,7 +84,7 @@ var _ DataColumnsVerifier = &RODataColumnsVerifier{}
|
||||
// were not run, an error will be returned.
|
||||
func (dv *RODataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
if !dv.results.allSatisfied() {
|
||||
return nil, dv.results.errors(errColumnsInvalid)
|
||||
return nil, dv.results.errors(ErrColumnsInvalid)
|
||||
}
|
||||
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(dv.dataColumns))
|
||||
@@ -521,7 +521,7 @@ func columnToSignatureData(d blocks.RODataColumn) signatureData {
|
||||
}
|
||||
|
||||
func columnErrBuilder(baseErr error) error {
|
||||
return errors.Wrap(baseErr, errColumnsInvalid.Error())
|
||||
return errors.Wrap(baseErr, ErrColumnsInvalid.Error())
|
||||
}
|
||||
|
||||
func inclusionProofKey(c blocks.RODataColumn) ([160]byte, error) {
|
||||
|
||||
@@ -947,7 +947,7 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
|
||||
// We haven't performed any verification, VerifiedRODataColumns should error.
|
||||
_, err := verifier.VerifiedRODataColumns()
|
||||
require.ErrorIs(t, err, errColumnsInvalid)
|
||||
require.ErrorIs(t, err, ErrColumnsInvalid)
|
||||
|
||||
var me VerificationMultiError
|
||||
ok := errors.As(err, &me)
|
||||
@@ -966,7 +966,7 @@ func TestColumnRequirementSatisfaction(t *testing.T) {
|
||||
|
||||
// One requirement is missing, VerifiedRODataColumns should still error.
|
||||
_, err = verifier.VerifiedRODataColumns()
|
||||
require.ErrorIs(t, err, errColumnsInvalid)
|
||||
require.ErrorIs(t, err, ErrColumnsInvalid)
|
||||
|
||||
// Now, satisfy the first requirement.
|
||||
verifier.results.record(GossipDataColumnSidecarRequirements[0], nil)
|
||||
|
||||
3
changelog/james-prysm_block-proposal-fulu.md
Normal file
3
changelog/james-prysm_block-proposal-fulu.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Fulu block proposal changes for beacon api and gRPC.
|
||||
2
changelog/kasey_backfill-data-columns.md
Normal file
2
changelog/kasey_backfill-data-columns.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Data column backfill.
|
||||
2
changelog/manu-retry-fetch-origin-columns.md
Normal file
2
changelog/manu-retry-fetch-origin-columns.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- Retry to fetch origin data column sidecars when starting from a checkpoint.
|
||||
2
changelog/pop_fix-bug.md
Normal file
2
changelog/pop_fix-bug.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- Fix bug where arguments of fillInForkChoiceMissingBlocks were incorrectly placed
|
||||
2
changelog/potuz_change_error_message.md
Normal file
2
changelog/potuz_change_error_message.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Fix error message.
|
||||
3
changelog/potuz_next_epoch_proposer_duties.md
Normal file
3
changelog/potuz_next_epoch_proposer_duties.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix next epoch proposer duties in Fulu by advancing the state to the beginning of the current epoch.
|
||||
2
changelog/potuz_start_from_justified.md
Normal file
2
changelog/potuz_start_from_justified.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Start from justified checkpoint by default.
|
||||
3
changelog/sahil-4555-refactor-to-use-atomic-types.md
Normal file
3
changelog/sahil-4555-refactor-to-use-atomic-types.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Changed old atomic functions to new atomic.Int for safer and clearer code.
|
||||
@@ -24,10 +24,10 @@ func ErrNotSupported(funcName string, ver int) error {
|
||||
|
||||
// ThreadSafeEnumerator is a thread-safe counter of all objects created since the node's start.
|
||||
type ThreadSafeEnumerator struct {
|
||||
counter uint64
|
||||
counter atomic.Uint64
|
||||
}
|
||||
|
||||
// Inc increments the enumerator and returns the new object count.
|
||||
func (c *ThreadSafeEnumerator) Inc() uint64 {
|
||||
return atomic.AddUint64(&c.counter, 1)
|
||||
return c.counter.Add(1)
|
||||
}
|
||||
|
||||
@@ -10,18 +10,18 @@ import (
|
||||
|
||||
// Arbitrary start time.
|
||||
var start = time.Date(1990, 1, 2, 0, 0, 0, 0, time.UTC).Round(0)
|
||||
var elapsed int64
|
||||
var elapsed atomic.Int64
|
||||
|
||||
// We provide atomic access to elapsed to avoid data races between multiple
|
||||
// concurrent goroutines during the tests.
|
||||
func getElapsed() time.Duration {
|
||||
return time.Duration(atomic.LoadInt64(&elapsed))
|
||||
return time.Duration(elapsed.Load())
|
||||
}
|
||||
func setElapsed(v time.Duration) {
|
||||
atomic.StoreInt64(&elapsed, int64(v))
|
||||
elapsed.Store(int64(v))
|
||||
}
|
||||
func addToElapsed(v time.Duration) {
|
||||
atomic.AddInt64(&elapsed, int64(v))
|
||||
elapsed.Add(int64(v))
|
||||
}
|
||||
|
||||
func reset(t *testing.T, c *Collector) {
|
||||
|
||||
@@ -8,12 +8,11 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -40,6 +39,7 @@ func RunBlockProcessingTest(t *testing.T, config, folderPath string) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{{MaxBlobsPerBlock: 9}}
|
||||
cfg.FuluForkEpoch = 0 // assume epochs on tests for state are post fulu
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
helpers.ClearCache()
|
||||
|
||||
@@ -286,6 +286,19 @@ func MaxSafeEpoch() primitives.Epoch {
|
||||
return primitives.Epoch(math.MaxUint64 / uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
}
|
||||
|
||||
// SafeEpochStartOrMax returns the start slot of the given epoch if it will not overflow,
|
||||
// otherwise it returns the
|
||||
func SafeEpochStartOrMax(e primitives.Epoch) primitives.Slot {
|
||||
// The max value converted to a slot can't be the start of a conceptual epoch,
|
||||
// because the first slot of that epoch would be overflow
|
||||
// so use the start slot of the epoch right before that value.
|
||||
me := MaxSafeEpoch() - 1
|
||||
if e > me {
|
||||
return UnsafeEpochStart(me)
|
||||
}
|
||||
return UnsafeEpochStart(e)
|
||||
}
|
||||
|
||||
// SecondsUntilNextEpochStart returns how many seconds until the next Epoch start from the current time and slot
|
||||
func SecondsUntilNextEpochStart(genesis time.Time) (uint64, error) {
|
||||
currentSlot := CurrentSlot(genesis)
|
||||
|
||||
@@ -3,7 +3,6 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -138,9 +137,11 @@ func (v *validator) LogSubmittedAtts(slot primitives.Slot) {
|
||||
|
||||
// LogSubmittedSyncCommitteeMessages logs info about submitted sync committee messages.
|
||||
func (v *validator) LogSubmittedSyncCommitteeMessages() {
|
||||
if v.syncCommitteeStats.totalMessagesSubmitted > 0 {
|
||||
log.WithField("messages", v.syncCommitteeStats.totalMessagesSubmitted).Debug("Submitted sync committee messages successfully to beacon node")
|
||||
if count := v.syncCommitteeStats.totalMessagesSubmitted.Load(); count > 0 {
|
||||
log.WithField("messages", count).
|
||||
Debug("Submitted sync committee messages successfully to beacon node")
|
||||
|
||||
// Reset the amount.
|
||||
atomic.StoreUint64(&v.syncCommitteeStats.totalMessagesSubmitted, 0)
|
||||
v.syncCommitteeStats.totalMessagesSubmitted.Store(0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package client
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
@@ -94,7 +93,7 @@ func (v *validator) SubmitSyncCommitteeMessage(ctx context.Context, slot primiti
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(msg.BlockRoot)),
|
||||
"validatorIndex": msg.ValidatorIndex,
|
||||
}).Info("Submitted new sync message")
|
||||
atomic.AddUint64(&v.syncCommitteeStats.totalMessagesSubmitted, 1)
|
||||
v.syncCommitteeStats.totalMessagesSubmitted.Add(1)
|
||||
}
|
||||
|
||||
// SubmitSignedContributionAndProof submits the signed sync committee contribution and proof to the beacon chain.
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -1571,5 +1572,5 @@ type voteStats struct {
|
||||
|
||||
// This tracks all validators' submissions for sync committees.
|
||||
type syncCommitteeStats struct {
|
||||
totalMessagesSubmitted uint64
|
||||
totalMessagesSubmitted atomic.Uint64
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user