mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
10 Commits
peerDAS
...
flag-sync-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18cced70ec | ||
|
|
196e457450 | ||
|
|
00f441e7e2 | ||
|
|
6f7e7f5885 | ||
|
|
bb666833c5 | ||
|
|
334eb40576 | ||
|
|
097605b45d | ||
|
|
8f68e224d9 | ||
|
|
9b2ee0f720 | ||
|
|
dcf9379dd2 |
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: 🐞 Bug report
|
||||
description: Report a bug or problem with running Prysm
|
||||
type: "Bug"
|
||||
labels: ["Bug"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -253,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.5"
|
||||
consensus_spec_version = "v1.6.0-alpha.4"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-BXuEb1XbeSft0qzVFnoB8KC0YR1qM3ybT5lKUDbUWn8=",
|
||||
"minimal": "sha256-EjwSHgBbWSoy5hm9V+A/bVMabyojaKsBNPrRtuPVq4k=",
|
||||
"mainnet": "sha256-OGWMzarzaV1B9mVpy48/DCUbhjfX+b64pAxWwPLWhAs=",
|
||||
"general": "sha256-MaN4zu3o0vWZypUHS5r4D8WzJF4wANoadM8qm6iyDs4=",
|
||||
"minimal": "sha256-aZGNPp/bBvJgq3Wf6vyR0H6G3DOkbSuggEmOL4jEmtg=",
|
||||
"mainnet": "sha256-C7jjosvpzUgw3GPajlsWBV02ZbkZ5Uv4ikmOqfDGajI=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-FQWR5EZuVcQGR0ol9vpd7eunnfGexJ/7J3xycrFEJbU=",
|
||||
integrity = "sha256-qreawRS77l8CebiNww8z727qUItw7KlHY1Xqj7IrPdk=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -102,6 +102,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,30 +1,10 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
return &ret
|
||||
}
|
||||
|
||||
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
|
||||
copy(ret[:], commitment)
|
||||
return
|
||||
}
|
||||
|
||||
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
if len(blobSidecars) == 0 {
|
||||
@@ -47,121 +27,18 @@ func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
|
||||
// This is more efficient than verifying each blob individually when len(blobs) > 1.
|
||||
// For single blob verification, it uses the optimized single verification path.
|
||||
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
|
||||
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
|
||||
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
|
||||
}
|
||||
|
||||
if len(blobs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Optimize for single blob case - use single verification to avoid batch overhead
|
||||
if len(blobs) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobs[0]),
|
||||
bytesToCommitment(commitments[0]),
|
||||
bytesToKZGProof(proofs[0]))
|
||||
}
|
||||
|
||||
// Use batch verification for multiple blobs
|
||||
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
|
||||
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
|
||||
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
|
||||
|
||||
for i := range blobs {
|
||||
if len(blobs[i]) != len(ckzg4844.Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
|
||||
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
|
||||
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
|
||||
}
|
||||
|
||||
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("batch KZG proof verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
return &ret
|
||||
}
|
||||
|
||||
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
|
||||
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
|
||||
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
|
||||
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
|
||||
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
|
||||
blobCount := uint64(len(blobs))
|
||||
expectedCellProofs := blobCount * numberOfColumns
|
||||
|
||||
if uint64(len(cellProofs)) != expectedCellProofs {
|
||||
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
|
||||
}
|
||||
|
||||
if len(commitments) != len(blobs) {
|
||||
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
|
||||
}
|
||||
|
||||
if blobCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle multiple blobs - compute cells for all blobs
|
||||
allCells := make([]Cell, 0, expectedCellProofs)
|
||||
allCommitments := make([]Bytes48, 0, expectedCellProofs)
|
||||
allIndices := make([]uint64, 0, expectedCellProofs)
|
||||
allProofs := make([]Bytes48, 0, expectedCellProofs)
|
||||
|
||||
for blobIndex := range blobs {
|
||||
if len(blobs[blobIndex]) != len(Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
|
||||
}
|
||||
// Convert blob to kzg.Blob type
|
||||
blob := Blob(blobs[blobIndex])
|
||||
|
||||
// Compute cells for this blob
|
||||
cells, err := ComputeCells(&blob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
// Add cells and corresponding data for each column
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
|
||||
if len(commitments[blobIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
|
||||
}
|
||||
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
|
||||
}
|
||||
allCells = append(allCells, cells[columnIndex])
|
||||
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
|
||||
allIndices = append(allIndices, columnIndex)
|
||||
|
||||
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
|
||||
}
|
||||
}
|
||||
|
||||
// Batch verify all cells
|
||||
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cell batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("cell KZG proof batch verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
|
||||
copy(ret[:], commitment)
|
||||
return
|
||||
}
|
||||
|
||||
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -37,7 +37,6 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -46,432 +45,3 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob batch", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob batch", func(t *testing.T) {
|
||||
blobCount := 3
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
proofs[i] = proof[:]
|
||||
}
|
||||
|
||||
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched input lengths", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test different mismatch scenarios
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{},
|
||||
[][]byte{proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
|
||||
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:], blob[:]},
|
||||
[][]byte{commitment[:]},
|
||||
[][]byte{proof[:], proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
_, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a different blob's commitment (mismatch)
|
||||
differentBlob := random.GetRandBlob(456)
|
||||
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
// Single blob optimization uses different error message
|
||||
require.ErrorContains(t, "can't verify opening proof", err)
|
||||
})
|
||||
|
||||
t.Run("invalid proof should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use wrong proof
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "short buffer", err)
|
||||
})
|
||||
|
||||
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
|
||||
// First blob - valid
|
||||
blob1 := random.GetRandBlob(123)
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second blob - invalid proof
|
||||
blob2 := random.GetRandBlob(456)
|
||||
commitment2, _, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment2[:]}
|
||||
proofs := [][]byte{proof1[:], invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("batch KZG proof verification failed", func(t *testing.T) {
|
||||
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
|
||||
blob1 := random.GetRandBlob(123)
|
||||
blob2 := random.GetRandBlob(456)
|
||||
|
||||
// Generate valid proof for blob1
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate valid proof for blob2 but use wrong commitment (from blob1)
|
||||
_, proof2, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
|
||||
proofs := [][]byte{proof1[:], proof2[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch KZG proof verification failed", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create flattened cell proofs (like execution client format)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := range blobCount {
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs for this blob
|
||||
for j := range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched blob and commitment count", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{}, // Empty commitments
|
||||
[][]byte{},
|
||||
128,
|
||||
)
|
||||
require.ErrorContains(t, "expected 128 cell proofs", err)
|
||||
})
|
||||
|
||||
t.Run("wrong cell proof count", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
|
||||
wrongCellProofs := make([][]byte, 10)
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proofs should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Create invalid cell proofs (all zeros)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
invalidCellProofs[i] = make([]byte, 48) // All zeros
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("mismatched commitment should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and correct cell proofs
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate wrong commitment from different blob
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var differentBlob Blob
|
||||
copy(differentBlob[:], randBlob2[:])
|
||||
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
|
||||
})
|
||||
|
||||
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Create invalid blob (not properly formatted)
|
||||
invalidBlobData := make([]byte, 10) // Too short
|
||||
commitment := make([]byte, 48) // Dummy commitment
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{invalidBlobData}
|
||||
commitments := [][]byte{commitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
// Create invalid commitment (wrong size)
|
||||
invalidCommitment := make([]byte, 32) // Should be 48 bytes
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{invalidCommitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proof size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create invalid cell proofs (wrong size)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if i == 0 {
|
||||
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
|
||||
} else {
|
||||
invalidCellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
// First blob - valid
|
||||
randBlob1 := random.GetRandBlob(123)
|
||||
var blob1 Blob
|
||||
copy(blob1[:], randBlob1[:])
|
||||
commitment1, err := BlobToKZGCommitment(&blob1)
|
||||
require.NoError(t, err)
|
||||
blobs[0] = blob1[:]
|
||||
commitments[0] = commitment1[:]
|
||||
|
||||
// Second blob - use invalid commitment size
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var blob2 Blob
|
||||
copy(blob2[:], randBlob2[:])
|
||||
blobs[1] = blob2[:]
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
} else {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -159,7 +159,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
|
||||
@@ -30,10 +30,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
|
||||
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
|
||||
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
@@ -458,9 +454,6 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if fCheckpoint.Epoch > jCheckpoint.Epoch {
|
||||
return ErrInvalidCheckpointArgs
|
||||
}
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
|
||||
@@ -375,81 +375,6 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_ErrorCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalizedEpoch primitives.Epoch
|
||||
justifiedEpoch primitives.Epoch
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "finalized epoch greater than justified epoch",
|
||||
finalizedEpoch: 5,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: ErrInvalidCheckpointArgs,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized equal to justified",
|
||||
finalizedEpoch: 3,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized less than justified",
|
||||
finalizedEpoch: 2,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
// Create a simple block for testing
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 10
|
||||
blk.Block.ParentRoot = service.originBlockRoot[:]
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
|
||||
// Create checkpoints with test case epochs
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.finalizedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.justifiedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
|
||||
// Set up forkchoice store to avoid other errors
|
||||
fcp := ðpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, service.originBlockRoot, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
t.Context(), wsb, finalizedCheckpoint, justifiedCheckpoint)
|
||||
|
||||
if tt.expectedError != nil {
|
||||
require.ErrorIs(t, err, tt.expectedError)
|
||||
} else {
|
||||
// For valid cases, we might get other errors (like block not being descendant of finalized)
|
||||
// but we shouldn't get the checkpoint validation error
|
||||
if err != nil && errors.Is(err, tt.expectedError) {
|
||||
t.Errorf("Unexpected checkpoint validation error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
//
|
||||
// /- B1
|
||||
@@ -2207,13 +2132,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
// Service's store has the justified checkpoint root as headRoot (verified below through justified checkpoint comparison)
|
||||
// Service's store has the finalized state as headRoot
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, bytesutil.ToBytes32(params.BeaconConfig().ZeroHash[:]), bytesutil.ToBytes32(headRoot)) // Ensure head is not zero
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic) // Head is now optimistic when starting from justified checkpoint
|
||||
require.Equal(t, false, optimistic)
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
// last valid state's justified checkpoint
|
||||
|
||||
@@ -20,7 +20,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice tree")
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
}
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
@@ -30,24 +30,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
if headStr == "" {
|
||||
return jRoot
|
||||
return fRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
|
||||
return jRoot
|
||||
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
|
||||
return jRoot
|
||||
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
@@ -86,7 +86,9 @@ go_test(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/monitor:go_default_library",
|
||||
@@ -99,6 +101,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_urfave_cli_v2//:go_default_library",
|
||||
],
|
||||
|
||||
@@ -318,7 +318,6 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx, clearer); err != nil {
|
||||
@@ -551,6 +550,11 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
return errors.Wrap(err, "could not ensure embedded genesis")
|
||||
}
|
||||
|
||||
// Validate sync options when starting with an empty database
|
||||
if err := b.validateSyncFlags(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.CheckpointInitializer != nil {
|
||||
log.Info("Checkpoint sync - Downloading origin state and block")
|
||||
if err := b.CheckpointInitializer.Initialize(b.ctx, b.db); err != nil {
|
||||
@@ -565,6 +569,52 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
log.WithField("address", depositAddress).Info("Deposit contract")
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateSyncFlags ensures that when starting with an empty database,
|
||||
// the user has explicitly chosen either genesis sync or checkpoint sync.
|
||||
func (b *BeaconNode) validateSyncFlags() error {
|
||||
// Check if database has an origin checkpoint (indicating it's not empty)
|
||||
_, err := b.db.OriginCheckpointBlockRoot(b.ctx)
|
||||
if err == nil {
|
||||
// Database is not empty, validation is not needed
|
||||
return nil
|
||||
}
|
||||
if !errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
// Some other error occurred
|
||||
return errors.Wrap(err, "could not check origin checkpoint block root")
|
||||
}
|
||||
|
||||
// if genesis exists, also consider DB non-empty.
|
||||
if gb, err := b.db.GenesisBlock(b.ctx); err == nil && gb != nil && !gb.IsNil() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Database is empty, check if user has provided required flags
|
||||
syncFromGenesis := b.cliCtx.Bool(flags.SyncFromGenesis.Name)
|
||||
hasCheckpointSync := b.CheckpointInitializer != nil
|
||||
|
||||
if !syncFromGenesis && !hasCheckpointSync {
|
||||
return errors.New("when starting with an empty database, you must specify either:\n" +
|
||||
" --sync-from-genesis (to sync from genesis)\n" +
|
||||
" --checkpoint-sync-url <url> (to sync from a remote beacon node)\n" +
|
||||
" --checkpoint-state <path> and --checkpoint-block <path> (to sync from local files)\n\n" +
|
||||
"Checkpoint sync is recommended for faster syncing.")
|
||||
}
|
||||
|
||||
// Check for conflicting sync options
|
||||
if syncFromGenesis && hasCheckpointSync {
|
||||
return errors.New("conflicting sync options: cannot use both --sync-from-genesis and checkpoint sync flags. " +
|
||||
"Please choose either genesis sync or checkpoint sync, not both.")
|
||||
}
|
||||
|
||||
if syncFromGenesis {
|
||||
log.Warn("Syncing from genesis is enabled. This will take a very long time and is not recommended. " +
|
||||
"Consider using checkpoint sync instead with --checkpoint-sync-url.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context, clearer *dbClearer) error {
|
||||
if !b.slasherEnabled {
|
||||
return nil
|
||||
@@ -941,7 +991,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -14,15 +15,19 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
mockExecution "github.com/OffchainLabs/prysm/v6/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/monitor"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -49,6 +54,7 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
set.Bool("demo-config", true, "demo configuration")
|
||||
set.String("deposit-contract", "0x0000000000000000000000000000000000000000", "deposit contract address")
|
||||
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
|
||||
set.Bool("sync-from-genesis", true, "sync from genesis")
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
cmd.ValidatorMonitorIndicesFlag.Value = &cli.IntSlice{}
|
||||
cmd.ValidatorMonitorIndicesFlag.Value.SetInt(1)
|
||||
@@ -74,6 +80,7 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("datadir", tmp, "node data directory")
|
||||
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
|
||||
set.Bool("sync-from-genesis", true, "sync from genesis")
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
@@ -104,6 +111,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("datadir", tmp, "node data directory")
|
||||
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
|
||||
set.Bool("sync-from-genesis", true, "sync from genesis")
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
|
||||
ctx, cancel := newCliContextWithCancel(&app, set)
|
||||
@@ -143,6 +151,7 @@ func TestClearDB(t *testing.T) {
|
||||
set.String("datadir", tmp, "node data directory")
|
||||
set.Bool(cmd.ForceClearDB.Name, true, "force clear db")
|
||||
set.String("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A", "fee recipient")
|
||||
set.Bool("sync-from-genesis", true, "sync from genesis")
|
||||
require.NoError(t, set.Set("suggested-fee-recipient", "0x6e35733c5af9B61374A128e6F85f553aF09ff89A"))
|
||||
context, cancel := newCliContextWithCancel(&app, set)
|
||||
|
||||
@@ -262,3 +271,128 @@ func TestCORS(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidateSyncFlags tests the validateSyncFlags function with real database instances
|
||||
func TestValidateSyncFlags(t *testing.T) {
|
||||
tests := []struct {
|
||||
expectWarning bool
|
||||
expectError bool
|
||||
hasCheckpointInitializer bool
|
||||
syncFromGenesis bool
|
||||
dbHasOriginCheckpoint bool
|
||||
expectedErrorContains string
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "Database not empty - validation skipped",
|
||||
dbHasOriginCheckpoint: true,
|
||||
syncFromGenesis: false,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Empty DB, no sync flags - should fail",
|
||||
dbHasOriginCheckpoint: false,
|
||||
syncFromGenesis: false,
|
||||
expectError: true,
|
||||
expectedErrorContains: "when starting with an empty database, you must specify either",
|
||||
},
|
||||
{
|
||||
name: "Empty DB, sync from genesis - should succeed with warning",
|
||||
dbHasOriginCheckpoint: false,
|
||||
syncFromGenesis: true,
|
||||
expectError: false,
|
||||
expectWarning: true,
|
||||
},
|
||||
{
|
||||
name: "Empty DB, checkpoint sync - should succeed",
|
||||
dbHasOriginCheckpoint: false,
|
||||
hasCheckpointInitializer: true,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Empty DB, conflicting sync options - should fail",
|
||||
dbHasOriginCheckpoint: false,
|
||||
syncFromGenesis: true,
|
||||
hasCheckpointInitializer: true,
|
||||
expectError: true,
|
||||
expectedErrorContains: "conflicting sync options",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Isolate Prometheus metrics per subtest to avoid duplicate registration across DB setups.
|
||||
reg := prometheus.NewRegistry()
|
||||
prometheus.DefaultRegisterer = reg
|
||||
prometheus.DefaultGatherer = reg
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Set up real database for testing (empty to start).
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
|
||||
// Populate database if needed (simulate "non-empty" via origin checkpoint).
|
||||
if tt.dbHasOriginCheckpoint {
|
||||
err := beaconDB.SaveOriginCheckpointBlockRoot(ctx, [32]byte{0x01})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Set up CLI flags
|
||||
flagSet := flag.NewFlagSet("test", flag.ContinueOnError)
|
||||
flagSet.Bool(flags.SyncFromGenesis.Name, tt.syncFromGenesis, "")
|
||||
|
||||
app := cli.App{}
|
||||
cliCtx := cli.NewContext(&app, flagSet, nil)
|
||||
|
||||
// Create BeaconNode with test setup
|
||||
beaconNode := &BeaconNode{
|
||||
ctx: ctx,
|
||||
db: beaconDB,
|
||||
cliCtx: cliCtx,
|
||||
}
|
||||
|
||||
// Set CheckpointInitializer if needed
|
||||
if tt.hasCheckpointInitializer {
|
||||
beaconNode.CheckpointInitializer = &mockCheckpointInitializer{}
|
||||
}
|
||||
|
||||
// Capture log output for warning detection
|
||||
hook := logTest.NewGlobal()
|
||||
defer hook.Reset()
|
||||
|
||||
// Call the function under test
|
||||
err := beaconNode.validateSyncFlags()
|
||||
|
||||
// Validate results
|
||||
if tt.expectError {
|
||||
require.NotNil(t, err)
|
||||
if tt.expectedErrorContains != "" {
|
||||
require.ErrorContains(t, tt.expectedErrorContains, err)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Check for warning log if expected
|
||||
if tt.expectWarning {
|
||||
found := false
|
||||
for _, entry := range hook.Entries {
|
||||
if entry.Level.String() == "warning" &&
|
||||
strings.Contains(entry.Message, "Syncing from genesis is enabled") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Expected warning log about genesis sync")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// mockCheckpointInitializer is a simple mock for testing
|
||||
type mockCheckpointInitializer struct{}
|
||||
|
||||
func (m *mockCheckpointInitializer) Initialize(ctx context.Context, db db.Database) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -155,7 +155,6 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
"agent": agentString(pid, s.Host()),
|
||||
})
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
@@ -270,7 +269,6 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
metaData: tc.metadata,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
@@ -331,7 +329,6 @@ func TestCustodyGroupCountFromPeerENR(t *testing.T) {
|
||||
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
actual := service.custodyGroupCountFromPeerENR(pid)
|
||||
|
||||
@@ -684,7 +684,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
|
||||
peerData, multiAddrs, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("node", node.String()).Debug("Could not convert to peer data")
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -851,7 +851,7 @@ func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
|
||||
multiAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "retrieve multiaddrs from node")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(multiAddrs) == 0 {
|
||||
|
||||
@@ -969,7 +969,7 @@ func TestFindPeers_NodeDeduplication(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := t.Context()
|
||||
ctx := context.Background()
|
||||
|
||||
// Create LocalNodes and manipulate sequence numbers
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
@@ -1193,6 +1193,8 @@ func TestFindPeers_received_bad_existing_node(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create LocalNode with same ID but different sequences
|
||||
localNode1 := createTestNodeWithID(t, "testnode")
|
||||
node1_seq1 := localNode1.Node() // Get current node
|
||||
@@ -1211,7 +1213,7 @@ func TestFindPeers_received_bad_existing_node(t *testing.T) {
|
||||
MaxPeers: 30,
|
||||
},
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
@@ -1241,7 +1243,7 @@ func TestFindPeers_received_bad_existing_node(t *testing.T) {
|
||||
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
|
||||
|
||||
// Run findPeers - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
|
||||
ctxWithTimeout, cancel := context.WithTimeout(t.Context(), 1*time.Second)
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := service.findPeers(ctxWithTimeout, 3)
|
||||
|
||||
@@ -63,7 +63,6 @@ type TestP2P struct {
|
||||
custodyInfoMut sync.RWMutex // protects custodyGroupCount and earliestAvailableSlot
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
enr *enr.Record
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
@@ -104,7 +103,6 @@ func NewTestP2P(t *testing.T, userOptions ...config.Option) *TestP2P {
|
||||
pubsub: ps,
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
peers: peerStatuses,
|
||||
enr: new(enr.Record),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -312,8 +310,8 @@ func (p *TestP2P) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
func (p *TestP2P) ENR() *enr.Record {
|
||||
return p.enr
|
||||
func (*TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
|
||||
@@ -18,7 +18,6 @@ go_library(
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -61,6 +60,7 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -84,7 +84,6 @@ go_test(
|
||||
"//api:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
@@ -125,6 +124,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -33,6 +32,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -942,13 +942,14 @@ func decodePhase0JSON(body []byte) (*eth.GenericSignedBeaconBlock, error) {
|
||||
// broadcastSidecarsIfSupported broadcasts blob sidecars when an equivocated block occurs.
|
||||
func broadcastSidecarsIfSupported(ctx context.Context, s *Server, b interfaces.SignedBeaconBlock, gb *eth.GenericSignedBeaconBlock, versionHeader string) error {
|
||||
switch versionHeader {
|
||||
case version.String(version.Fulu):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetFulu().Blobs, gb.GetFulu().KzgProofs)
|
||||
case version.String(version.Electra):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetElectra().Blobs, gb.GetElectra().KzgProofs)
|
||||
case version.String(version.Deneb):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetDeneb().Blobs, gb.GetDeneb().KzgProofs)
|
||||
default:
|
||||
// other forks before Deneb do not support blob sidecars
|
||||
// forks after fulu do not support blob sidecars, instead support data columns, no need to rebroadcast
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1052,7 +1053,7 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.validateBlobs(blk, blobs, proofs); err != nil {
|
||||
if err := s.validateBlobSidecars(blk, blobs, proofs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1066,41 +1067,23 @@ func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateBlobs(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
func (s *Server) validateBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
commitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
kzgs, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments")
|
||||
}
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blk.Block().Slot())
|
||||
if len(blobs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("number of blobs over max, %d > %d", len(blobs), maxBlobsPerBlock)
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(kzgs) {
|
||||
return errors.New("number of blobs, proofs, and commitments do not match")
|
||||
}
|
||||
if blk.Version() >= version.Fulu {
|
||||
// For Fulu blocks, proofs are cell proofs (blobs * numberOfColumns)
|
||||
expectedProofsCount := uint64(len(blobs)) * numberOfColumns
|
||||
if uint64(len(proofs)) != expectedProofsCount || len(blobs) != len(commitments) {
|
||||
return fmt.Errorf("number of blobs (%d), cell proofs (%d), and commitments (%d) do not match (expected %d cell proofs)", len(blobs), len(proofs), len(commitments), expectedProofsCount)
|
||||
}
|
||||
// For Fulu blocks, proofs are cell proofs from execution client's BlobsBundleV2
|
||||
// Verify cell proofs directly without reconstructing data column sidecars
|
||||
if err := kzg.VerifyCellKZGProofBatchFromBlobData(blobs, commitments, proofs, numberOfColumns); err != nil {
|
||||
return errors.Wrap(err, "could not verify cell proofs")
|
||||
}
|
||||
} else {
|
||||
// For pre-Fulu blocks, proofs are blob proofs (1:1 with blobs)
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(commitments) {
|
||||
return errors.Errorf("number of blobs (%d), proofs (%d), and commitments (%d) do not match", len(blobs), len(proofs), len(commitments))
|
||||
}
|
||||
// Use batch verification for better performance
|
||||
if err := kzg.VerifyBlobKZGProofBatch(blobs, commitments, proofs); err != nil {
|
||||
return errors.Wrap(err, "could not verify blob proofs")
|
||||
for i, blob := range blobs {
|
||||
b := kzg4844.Blob(blob)
|
||||
if err := kzg4844.VerifyBlobProof(&b, kzg4844.Commitment(kzgs[i]), kzg4844.Proof(proofs[i])); err != nil {
|
||||
return errors.Wrap(err, "could not verify blob proof")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1644,8 +1627,6 @@ func (s *Server) broadcastSeenBlockSidecars(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Broadcast blob sidecars with forkchoice checking
|
||||
for _, sc := range scs {
|
||||
r, err := sc.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
chainMock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -41,6 +40,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
@@ -4781,329 +4781,25 @@ func TestServer_broadcastBlobSidecars(t *testing.T) {
|
||||
require.LogsContain(t, hook, "Broadcasted blob sidecar for already seen block")
|
||||
}
|
||||
|
||||
func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
func Test_validateBlobSidecars(t *testing.T) {
|
||||
blob := util.GetRandBlob(123)
|
||||
// Generate proper commitment and proof for the blob
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
proof, err := kzg.ComputeBlobKZGProof(&kzgBlob, commitment)
|
||||
require.NoError(t, err)
|
||||
commitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
|
||||
proof := GoKZG.KZGProof{128, 110, 116, 170, 56, 111, 126, 87, 229, 234, 211, 42, 110, 150, 129, 206, 73, 142, 167, 243, 90, 149, 240, 240, 236, 204, 143, 182, 229, 249, 81, 27, 153, 171, 83, 70, 144, 250, 42, 1, 188, 215, 71, 235, 30, 7, 175, 86}
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
require.NoError(t, s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
require.ErrorContains(t, "number of blobs (1), proofs (0), and commitments (1) do not match", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{}))
|
||||
require.ErrorContains(t, "number of blobs, proofs, and commitments do not match", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{}))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "could not verify blob proofs", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
blobs := [][]byte{}
|
||||
commitments := [][]byte{}
|
||||
proofs := [][]byte{}
|
||||
for i := 0; i < 10; i++ {
|
||||
blobs = append(blobs, blob[:])
|
||||
commitments = append(commitments, commitment[:])
|
||||
proofs = append(proofs, proof[:])
|
||||
}
|
||||
t.Run("pre-Deneb block should return early", func(t *testing.T) {
|
||||
// Create a pre-Deneb block (e.g., Capella)
|
||||
blk := util.NewBeaconBlockCapella()
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should return nil for pre-Deneb blocks regardless of blobs
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:1], proofs[:1]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block with valid single blob", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
})
|
||||
|
||||
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:6]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with exactly 6 blobs
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:6], proofs[:6]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:7]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 7 blobs when max is 6
|
||||
err = s.validateBlobs(b, blobs[:7], proofs[:7])
|
||||
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
|
||||
})
|
||||
|
||||
t.Run("Electra block with valid blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot (epoch 5+)
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with 9 blobs in Electra
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
})
|
||||
|
||||
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 10 blobs when max is 9
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
|
||||
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
testCfg.NumberOfColumns = 128 // Standard PeerDAS configuration
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
// Create Fulu block with proper cell proofs
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
|
||||
// Generate valid commitments and cell proofs for testing
|
||||
blobCount := 2
|
||||
commitments := make([][]byte, blobCount)
|
||||
fuluBlobs := make([][]byte, blobCount)
|
||||
var kzgBlobs []kzg.Blob
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := util.GetRandBlob(int64(i))
|
||||
fuluBlobs[i] = blob[:]
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
kzgBlobs = append(kzgBlobs, kzgBlob)
|
||||
|
||||
// Generate commitment
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
commitments[i] = commitment[:]
|
||||
}
|
||||
|
||||
blk.Block.Body.BlobKzgCommitments = commitments
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate cell proofs for the blobs (flattened format like execution client)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
|
||||
for blobIdx := 0; blobIdx < blobCount; blobIdx++ {
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlobs[blobIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
for colIdx := uint64(0); colIdx < numberOfColumns; colIdx++ {
|
||||
cellProofIdx := uint64(blobIdx)*numberOfColumns + colIdx
|
||||
cellProofs[cellProofIdx] = cellsAndProofs.Proofs[colIdx][:]
|
||||
}
|
||||
}
|
||||
|
||||
s := &Server{}
|
||||
// Should use cell batch verification for Fulu blocks
|
||||
require.NoError(t, s.validateBlobs(b, fuluBlobs, cellProofs))
|
||||
})
|
||||
|
||||
t.Run("Fulu block with invalid cell proof count", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
|
||||
// Create valid commitments but wrong number of cell proofs
|
||||
blobCount := 2
|
||||
commitments := make([][]byte, blobCount)
|
||||
fuluBlobs := make([][]byte, blobCount)
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := util.GetRandBlob(int64(i))
|
||||
fuluBlobs[i] = blob[:]
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
commitments[i] = commitment[:]
|
||||
}
|
||||
|
||||
blk.Block.Body.BlobKzgCommitments = commitments
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wrong number of cell proofs (should be blobCount * numberOfColumns)
|
||||
wrongCellProofs := make([][]byte, 10) // Too few proofs
|
||||
|
||||
s := &Server{}
|
||||
err = s.validateBlobs(b, fuluBlobs, wrongCellProofs)
|
||||
require.ErrorContains(t, "do not match", err)
|
||||
})
|
||||
|
||||
t.Run("Deneb block with invalid blob proof", func(t *testing.T) {
|
||||
blob := util.GetRandBlob(123)
|
||||
invalidProof := make([]byte, 48) // All zeros - invalid proof
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{}
|
||||
err = s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{invalidProof})
|
||||
require.ErrorContains(t, "could not verify blob proofs", err)
|
||||
})
|
||||
|
||||
t.Run("empty blobs and proofs should pass", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
|
||||
})
|
||||
|
||||
t.Run("BlobSchedule with progressive increases (BPO)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.FuluForkEpoch = 200
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
// Define blob schedule with progressive increases
|
||||
testCfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
}
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
s := &Server{}
|
||||
|
||||
// Test epoch 0-9: max 3 blobs
|
||||
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 5 // Epoch 0
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:3]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:3], proofs[:3]))
|
||||
|
||||
// Should fail with 4 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:4]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:4], proofs[:4])
|
||||
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
|
||||
})
|
||||
|
||||
// Test epoch 30+: max 9 blobs
|
||||
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 960 // Epoch 30
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
|
||||
// Should fail with 10 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
})
|
||||
require.ErrorContains(t, "could not verify blob proof: can't verify opening proof", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
}
|
||||
|
||||
func TestGetPendingConsolidations(t *testing.T) {
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
# gazelle:ignore
|
||||
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
@@ -39,7 +37,6 @@ go_library(
|
||||
"//api/client/builder:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
@@ -50,7 +47,6 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -67,8 +63,8 @@ go_library(
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -85,7 +81,7 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
@@ -185,6 +181,7 @@ common_deps = [
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
]
|
||||
|
||||
# gazelle:ignore
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
timeout = "moderate",
|
||||
|
||||
@@ -29,19 +29,12 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r1, err := eb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
bundle := &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{{1, 2, 3}},
|
||||
Proofs: [][]byte{{4, 5, 6}},
|
||||
Blobs: [][]byte{{7, 8, 9}},
|
||||
}
|
||||
result, err := vs.constructGenericBeaconBlock(b, bundle, primitives.ZeroWei())
|
||||
result, err := vs.constructGenericBeaconBlock(b, nil, primitives.ZeroWei())
|
||||
require.NoError(t, err)
|
||||
r2, err := result.GetFulu().Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, r1, r2)
|
||||
require.Equal(t, result.IsBlinded, false)
|
||||
require.DeepEqual(t, bundle.Blobs, result.GetFulu().GetBlobs())
|
||||
require.DeepEqual(t, bundle.Proofs, result.GetFulu().GetKzgProofs())
|
||||
})
|
||||
|
||||
// Test for Electra version
|
||||
|
||||
@@ -15,12 +15,9 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -61,31 +58,28 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert slot to time")
|
||||
}
|
||||
|
||||
log := log.WithField("slot", req.Slot)
|
||||
log.WithField("sinceSlotStartTime", time.Since(t)).Info("Begin building block")
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
}).Info("Begin building block")
|
||||
|
||||
// A syncing validator should not produce a block.
|
||||
if vs.SyncChecker.Syncing() {
|
||||
log.Error("Fail to build block: node is syncing")
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
log.WithError(err).Error("Fail to build block: node is optimistic")
|
||||
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
head, parentRoot, err := vs.getParentState(ctx, req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get parent state")
|
||||
return nil, err
|
||||
}
|
||||
sBlk, err := getEmptyBlock(req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get empty block")
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
|
||||
}
|
||||
// Set slot, graffiti, randao reveal, and parent root.
|
||||
@@ -107,7 +101,8 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
|
||||
resp, err := vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost, builderBoostFactor)
|
||||
log = log.WithFields(logrus.Fields{
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
"validator": sBlk.Block().ProposerIndex(),
|
||||
})
|
||||
@@ -280,11 +275,6 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
//
|
||||
// ProposeBeaconBlock handles the proposal of beacon blocks.
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar
|
||||
)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
@@ -296,7 +286,6 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
|
||||
}
|
||||
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
|
||||
@@ -311,10 +300,11 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
var sidecars []*ethpb.BlobSidecar
|
||||
if block.IsBlinded() {
|
||||
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
} else if block.Version() >= version.Deneb {
|
||||
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(block, req)
|
||||
sidecars, err = vs.blobSidecarsFromUnblindedBlock(block, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
@@ -322,6 +312,7 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -332,9 +323,10 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if err := <-errChan; err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
|
||||
@@ -343,35 +335,12 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
// broadcastAndReceiveSidecars broadcasts and receives sidecars.
|
||||
func (vs *Server) broadcastAndReceiveSidecars(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
root [fieldparams.RootLength]byte,
|
||||
blobSidecars []*ethpb.BlobSidecar,
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
) error {
|
||||
if block.Version() >= version.Fulu {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root, block.Block().Slot()); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive data columns")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive blobs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks (pre-Fulu only).
|
||||
// Post-Fulu blinded blocks are handled directly in ProposeBeaconBlock.
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
|
||||
if block.Version() < version.Bellatrix {
|
||||
return nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
}
|
||||
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return nil, nil, errors.New("unconfigured block builder")
|
||||
}
|
||||
@@ -398,34 +367,16 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
||||
return copiedBlock, sidecars, nil
|
||||
}
|
||||
|
||||
func (vs *Server) handleUnblindedBlock(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
func (vs *Server) blobSidecarsFromUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
|
||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if block.Version() >= version.Fulu {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil, nil
|
||||
return BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "protobuf conversion failed")
|
||||
@@ -441,14 +392,18 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
|
||||
}
|
||||
|
||||
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
|
||||
eg, eCtx := errgroup.WithContext(ctx)
|
||||
for subIdx, sc := range sidecars {
|
||||
for i, sc := range sidecars {
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
subIdx := i
|
||||
sCar := sc
|
||||
eg.Go(func() error {
|
||||
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sc); err != nil {
|
||||
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sCar); err != nil {
|
||||
return errors.Wrap(err, "broadcast blob failed")
|
||||
}
|
||||
readOnlySc, err := blocks.NewROBlobWithRoot(sc, root)
|
||||
readOnlySc, err := blocks.NewROBlobWithRoot(sCar, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ROBlob creation failed")
|
||||
}
|
||||
@@ -466,69 +421,6 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
sidecars []*ethpb.DataColumnSidecar,
|
||||
root [fieldparams.RootLength]byte,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
dataColumnsWithholdCount := features.Get().DataColumnsWithholdCount
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, sd := range sidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sd, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
sidecar := sd
|
||||
eg.Go(func() error {
|
||||
if sidecar.Index < dataColumnsWithholdCount {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"index": sidecar.Index,
|
||||
}).Warning("Withholding data column")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumnSidecar(root, subnet, sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
}
|
||||
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
|
||||
builderTest "github.com/OffchainLabs/prysm/v6/beacon-chain/builder/testing"
|
||||
@@ -895,9 +894,6 @@ func injectSlashings(t *testing.T, st state.BeaconState, keys []bls.SecretKey, s
|
||||
}
|
||||
|
||||
func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
// Initialize KZG for Fulu blocks
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
block func([32]byte) *ethpb.GenericSignedBeaconBlock
|
||||
@@ -1102,131 +1098,6 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
},
|
||||
err: "blob KZG commitments don't match number of blobs or KZG proofs",
|
||||
},
|
||||
{
|
||||
name: "fulu block no blob",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{Slot: 5, ParentRoot: parent[:], Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{})},
|
||||
},
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu block with single blob and cell proofs",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
numberOfColumns := uint64(128)
|
||||
// For Fulu, we have cell proofs (blobs * numberOfColumns)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := uint64(0); i < numberOfColumns; i++ {
|
||||
cellProofs[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
// Blob must be exactly 131072 bytes
|
||||
blob := make([]byte, 131072)
|
||||
blob[0] = 0x01
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Slot: 5, ParentRoot: parent[:],
|
||||
Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{
|
||||
BlobKzgCommitments: [][]byte{bytesutil.PadTo([]byte("kc"), 48)},
|
||||
}),
|
||||
},
|
||||
},
|
||||
KzgProofs: cellProofs,
|
||||
Blobs: [][]byte{blob},
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu block with multiple blobs and cell proofs",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 3
|
||||
// For Fulu, we have cell proofs (blobs * numberOfColumns)
|
||||
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
|
||||
for i := range cellProofs {
|
||||
cellProofs[i] = bytesutil.PadTo([]byte{byte(i % 256)}, 48)
|
||||
}
|
||||
// Create properly sized blobs (131072 bytes each)
|
||||
blobs := make([][]byte, blobCount)
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := make([]byte, 131072)
|
||||
blob[0] = byte(i + 1)
|
||||
blobs[i] = blob
|
||||
}
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Slot: 5, ParentRoot: parent[:],
|
||||
Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{
|
||||
BlobKzgCommitments: [][]byte{
|
||||
bytesutil.PadTo([]byte("kc"), 48),
|
||||
bytesutil.PadTo([]byte("kc1"), 48),
|
||||
bytesutil.PadTo([]byte("kc2"), 48),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
KzgProofs: cellProofs,
|
||||
Blobs: blobs,
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu block wrong cell proof count (should be blobs * 128)",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
// Wrong number of cell proofs - should be 2 * 128 = 256, but providing only 2
|
||||
// Create properly sized blobs
|
||||
blob1 := make([]byte, 131072)
|
||||
blob1[0] = 0x01
|
||||
blob2 := make([]byte, 131072)
|
||||
blob2[0] = 0x02
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Slot: 5, ParentRoot: parent[:],
|
||||
Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{
|
||||
BlobKzgCommitments: [][]byte{
|
||||
bytesutil.PadTo([]byte("kc"), 48),
|
||||
bytesutil.PadTo([]byte("kc1"), 48),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
KzgProofs: [][]byte{{0x01}, {0x02}}, // Wrong: should be 256 cell proofs
|
||||
Blobs: [][]byte{blob1, blob2},
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
err: "blobs and cells proofs mismatch",
|
||||
},
|
||||
{
|
||||
name: "blind fulu block with blob commitments",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
blockToPropose := util.NewBlindedBeaconBlockFulu()
|
||||
blockToPropose.Message.Slot = 5
|
||||
blockToPropose.Message.ParentRoot = parent[:]
|
||||
txRoot, err := ssz.TransactionsRoot([][]byte{})
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*enginev1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
blockToPropose.Message.Body.ExecutionPayloadHeader.TransactionsRoot = txRoot[:]
|
||||
blockToPropose.Message.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
|
||||
blockToPropose.Message.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}
|
||||
blk := ðpb.GenericSignedBeaconBlock_BlindedFulu{BlindedFulu: blockToPropose}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
useBuilder: true,
|
||||
err: "commitment value doesn't match block", // Known issue with mock builder cell proof mismatch
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -1240,29 +1111,15 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
db := dbutil.SetupDB(t)
|
||||
// Create cell proofs for Fulu blocks (128 proofs per blob)
|
||||
numberOfColumns := uint64(128)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := uint64(0); i < numberOfColumns; i++ {
|
||||
cellProofs[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
// Create properly sized blob for mock builder
|
||||
mockBlob := make([]byte, 131072)
|
||||
mockBlob[0] = 0x03
|
||||
// Use the same commitment as in the blind block test
|
||||
mockCommitment := bytesutil.PadTo([]byte{0x01}, 48)
|
||||
|
||||
proposerServer := &Server{
|
||||
BlockReceiver: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: tt.useBuilder, PayloadCapella: emptyPayloadCapella(), PayloadDeneb: emptyPayloadDeneb(),
|
||||
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{mockCommitment}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}},
|
||||
BlobBundleV2: &enginev1.BlobsBundleV2{KzgCommitments: [][]byte{mockCommitment}, Proofs: cellProofs, Blobs: [][]byte{mockBlob}}},
|
||||
BeaconDB: db,
|
||||
BlobReceiver: c,
|
||||
DataColumnReceiver: c, // Add DataColumnReceiver for Fulu blocks
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}}},
|
||||
BeaconDB: db,
|
||||
BlobReceiver: c,
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
}
|
||||
blockToPropose := tt.block(bsRoot)
|
||||
res, err := proposerServer.ProposeBeaconBlock(t.Context(), blockToPropose)
|
||||
|
||||
@@ -69,7 +69,6 @@ type Server struct {
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
|
||||
@@ -89,7 +89,6 @@ type Config struct {
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
@@ -239,7 +238,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
P2P: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
BlobReceiver: s.cfg.BlobReceiver,
|
||||
DataColumnReceiver: s.cfg.DataColumnReceiver,
|
||||
MockEth1Votes: s.cfg.MockEth1Votes,
|
||||
Eth1BlockFetcher: s.cfg.ExecutionChainService,
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
|
||||
@@ -8,7 +8,6 @@ go_library(
|
||||
"mock_blocker.go",
|
||||
"mock_exec_chain_info_fetcher.go",
|
||||
"mock_genesis_timefetcher.go",
|
||||
"mock_sidecars.go",
|
||||
"mock_stater.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil",
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
package testutil
|
||||
|
||||
import ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
|
||||
// CreateDataColumnSidecar generates a filled dummy data column sidecar
|
||||
func CreateDataColumnSidecar(index uint64, data []byte) *ethpb.DataColumnSidecar {
|
||||
return ðpb.DataColumnSidecar{
|
||||
Index: index,
|
||||
Column: [][]byte{data},
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
KzgCommitments: [][]byte{make([]byte, 48)},
|
||||
KzgProofs: [][]byte{make([]byte, 48)},
|
||||
KzgCommitmentsInclusionProof: [][]byte{make([]byte, 32)},
|
||||
}
|
||||
}
|
||||
|
||||
// CreateBlobSidecar generates a filled dummy data blob sidecar
|
||||
func CreateBlobSidecar(index uint64, blob []byte) *ethpb.BlobSidecar {
|
||||
return ðpb.BlobSidecar{
|
||||
Index: index,
|
||||
Blob: blob,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
KzgCommitment: make([]byte, 48),
|
||||
KzgProof: make([]byte, 48),
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"math"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -29,14 +30,13 @@ import (
|
||||
// DataColumnSidecarsParams stores the common parameters needed to
|
||||
// fetch data column sidecars from peers.
|
||||
type DataColumnSidecarsParams struct {
|
||||
Ctx context.Context // Context
|
||||
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
|
||||
P2P prysmP2P.P2P // P2P network interface
|
||||
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
|
||||
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
|
||||
Storage filesystem.DataColumnStorageReader // Data columns storage
|
||||
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
|
||||
DownscorePeerOnRPCFault bool // Downscore a peer if it commits an RPC fault. Not responding sidecars at all is considered as a fault.
|
||||
Ctx context.Context // Context
|
||||
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
|
||||
P2P prysmP2P.P2P // P2P network interface
|
||||
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
|
||||
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
|
||||
Storage filesystem.DataColumnStorageReader // Data columns storage
|
||||
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
|
||||
}
|
||||
|
||||
// FetchDataColumnSidecars retrieves data column sidecars from storage and peers for the given
|
||||
@@ -64,7 +64,7 @@ func FetchDataColumnSidecars(
|
||||
|
||||
indices := sortedSliceFromMap(indicesMap)
|
||||
slotsWithCommitments := make(map[primitives.Slot]bool)
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
indicesByRootToQuery := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
indicesByRootStored := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
|
||||
@@ -83,7 +83,7 @@ func FetchDataColumnSidecars(
|
||||
root := roBlock.Root()
|
||||
|
||||
// Step 1: Get the requested sidecars for this root if available in storage
|
||||
requestedColumns, err := tryGetStoredColumns(params.Storage, root, indices)
|
||||
requestedColumns, err := tryGetDirectColumns(params.Storage, root, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||
}
|
||||
@@ -107,7 +107,7 @@ func FetchDataColumnSidecars(
|
||||
indicesToQueryMap, indicesStoredMap := categorizeIndices(params.Storage, root, indices)
|
||||
|
||||
if len(indicesToQueryMap) > 0 {
|
||||
missingIndicesByRoot[root] = indicesToQueryMap
|
||||
indicesByRootToQuery[root] = indicesToQueryMap
|
||||
}
|
||||
if len(indicesStoredMap) > 0 {
|
||||
indicesByRootStored[root] = indicesStoredMap
|
||||
@@ -115,57 +115,40 @@ func FetchDataColumnSidecars(
|
||||
}
|
||||
|
||||
// Early return if no sidecars need to be queried from peers.
|
||||
if len(missingIndicesByRoot) == 0 {
|
||||
if len(indicesByRootToQuery) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Step 3b: Request missing sidecars from peers.
|
||||
start, count := time.Now(), computeTotalCount(missingIndicesByRoot)
|
||||
fromPeersResult, err := tryRequestingColumnsFromPeers(params, roBlocks, slotsWithCommitments, missingIndicesByRoot)
|
||||
start, count := time.Now(), computeTotalCount(indicesByRootToQuery)
|
||||
fromPeersResult, err := tryRequestingColumnsFromPeers(params, roBlocks, slotsWithCommitments, indicesByRootToQuery)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "request from peers")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{"duration": time.Since(start), "count": count}).Debug("Requested data column sidecars from peers")
|
||||
|
||||
// Step 3c: If needed, try to reconstruct missing sidecars from storage and fetched data.
|
||||
fromReconstructionResult, err := tryReconstructFromStorageAndPeers(params.Storage, fromPeersResult, indicesMap, missingIndicesByRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "reconstruct from storage and peers")
|
||||
for root, verifiedSidecars := range fromPeersResult {
|
||||
result[root] = append(result[root], verifiedSidecars...)
|
||||
}
|
||||
|
||||
for root, verifiedSidecars := range fromReconstructionResult {
|
||||
result[root] = verifiedSidecars
|
||||
}
|
||||
|
||||
for root := range fromPeersResult {
|
||||
if _, ok := fromReconstructionResult[root]; ok {
|
||||
// We already have what we need from peers + reconstruction
|
||||
continue
|
||||
}
|
||||
|
||||
result[root] = append(result[root], fromPeersResult[root]...)
|
||||
|
||||
storedIndices := indicesByRootStored[root]
|
||||
if len(storedIndices) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
storedColumns, err := tryGetStoredColumns(params.Storage, root, sortedSliceFromMap(storedIndices))
|
||||
// Step 3c: Load the stored sidecars.
|
||||
for root, indicesStored := range indicesByRootStored {
|
||||
requestedColumns, err := tryGetDirectColumns(params.Storage, root, sortedSliceFromMap(indicesStored))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||
}
|
||||
|
||||
result[root] = append(result[root], storedColumns...)
|
||||
result[root] = append(result[root], requestedColumns...)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// tryGetStoredColumns attempts to retrieve all requested data column sidecars directly from storage
|
||||
// if they are all available. Returns the sidecars if successful, and nil if at least one
|
||||
// tryGetDirectColumns attempts to retrieve all requested columns directly from storage
|
||||
// if they are all available. Returns the columns if successful, and nil if at least one
|
||||
// requested sidecar is not available in the storage.
|
||||
func tryGetStoredColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
func tryGetDirectColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if all requested indices are present in cache
|
||||
storedIndices := storage.Summary(blockRoot).Stored()
|
||||
allRequestedPresent := true
|
||||
@@ -251,9 +234,9 @@ func categorizeIndices(storage filesystem.DataColumnStorageReader, blockRoot [fi
|
||||
// It explores the connected peers to find those that are expected to custody the requested columns
|
||||
// and returns only when all requested columns are either retrieved or have been tried to be retrieved
|
||||
// by all possible peers.
|
||||
// WARNING: This function alters `missingIndicesByRoot` by removing successfully retrieved columns.
|
||||
// After running this function, the user can check the content of the (modified) `missingIndicesByRoot` map
|
||||
// to check if some sidecars are still missing.
|
||||
// Returns a map of block roots to their verified read-only data column sidecars and a map of block roots.
|
||||
// Returns an error if at least one requested column could not be retrieved.
|
||||
// WARNING: This function alters `missingIndicesByRoot`. The caller should NOT use it after running this function.
|
||||
func tryRequestingColumnsFromPeers(
|
||||
p DataColumnSidecarsParams,
|
||||
roBlocks []blocks.ROBlock,
|
||||
@@ -301,7 +284,8 @@ func tryRequestingColumnsFromPeers(
|
||||
}
|
||||
|
||||
// Remove the verified sidecars from the missing indices map and compute the new verified columns by root.
|
||||
localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
|
||||
newMissingIndicesByRoot, localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
|
||||
missingIndicesByRoot = newMissingIndicesByRoot
|
||||
for root, verifiedRoDataColumns := range localVerifiedColumnsByRoot {
|
||||
verifiedColumnsByRoot[root] = append(verifiedColumnsByRoot[root], verifiedRoDataColumns...)
|
||||
}
|
||||
@@ -313,69 +297,13 @@ func tryRequestingColumnsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
if len(missingIndicesByRoot) > 0 {
|
||||
return nil, errors.New("not all requested data column sidecars were retrieved from peers")
|
||||
}
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// tryReconstructFromStorageAndPeers attempts to reconstruct missing data column sidecars
|
||||
// using the data available in the storage and the data fetched from peers.
|
||||
// If, for at least one root, the reconstruction is not possible, an error is returned.
|
||||
func tryReconstructFromStorageAndPeers(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
fromPeersByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
indices map[uint64]bool,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
if len(missingIndicesByRoot) == 0 {
|
||||
// Nothing to do, return early.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
start := time.Now()
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(missingIndicesByRoot))
|
||||
for root := range missingIndicesByRoot {
|
||||
// Check if a reconstruction is possible based on what we have from the store and fetched from peers.
|
||||
summary := storage.Summary(root)
|
||||
storedCount := summary.Count()
|
||||
fetchedCount := uint64(len(fromPeersByRoot[root]))
|
||||
|
||||
if storedCount+fetchedCount < minimumColumnsCountToReconstruct {
|
||||
return nil, errors.Errorf("cannot reconstruct all needed columns for root %#x. stored: %d, fetched: %d, minimum: %d", root, storedCount, fetchedCount, minimumColumnsCountToReconstruct)
|
||||
}
|
||||
|
||||
// Load all we have in the store.
|
||||
storedSidecars, err := storage.Get(root, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get stored sidecars for root %#x", root)
|
||||
}
|
||||
|
||||
sidecars := make([]blocks.VerifiedRODataColumn, 0, storedCount+fetchedCount)
|
||||
sidecars = append(sidecars, storedSidecars...)
|
||||
sidecars = append(sidecars, fromPeersByRoot[root]...)
|
||||
|
||||
// Attempt reconstruction.
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(sidecars)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct data columns for root %#x", root)
|
||||
}
|
||||
|
||||
// Select only sidecars we need.
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
if indices[sidecar.Index] {
|
||||
result[root] = append(result[root], sidecar)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"rootCount": len(missingIndicesByRoot),
|
||||
"elapsed": time.Since(start),
|
||||
}).Debug("Reconstructed from storage and peers")
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// selectPeers selects peers to query the sidecars.
|
||||
// It begins by randomly selecting a peer in `origIndicesByRootByPeer` that has enough bandwidth,
|
||||
// and assigns to it all its available sidecars. Then, it randomly select an other peer, until
|
||||
@@ -386,7 +314,7 @@ func selectPeers(
|
||||
count int,
|
||||
origIndicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
const randomPeerTimeout = 2 * time.Minute
|
||||
const randomPeerTimeout = 30 * time.Second
|
||||
|
||||
// Select peers to query the missing sidecars from.
|
||||
indicesByRootByPeer := copyIndicesByRootByPeer(origIndicesByRootByPeer)
|
||||
@@ -443,14 +371,12 @@ func selectPeers(
|
||||
}
|
||||
|
||||
// updateResults updates the missing indices and verified sidecars maps based on the newly verified sidecars.
|
||||
// WARNING: This function alters `missingIndicesByRoot` by removing verified sidecars.
|
||||
// After running this function, the user can check the content of the (modified) `missingIndicesByRoot` map
|
||||
// to check if some sidecars are still missing.
|
||||
func updateResults(
|
||||
verifiedSidecars []blocks.VerifiedRODataColumn,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn {
|
||||
origMissingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte]map[uint64]bool, map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn) {
|
||||
// Copy the original map to avoid modifying it directly.
|
||||
missingIndicesByRoot := copyIndicesByRoot(origMissingIndicesByRoot)
|
||||
verifiedSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for _, verifiedSidecar := range verifiedSidecars {
|
||||
blockRoot := verifiedSidecar.BlockRoot()
|
||||
@@ -467,7 +393,7 @@ func updateResults(
|
||||
}
|
||||
}
|
||||
|
||||
return verifiedSidecarsByRoot
|
||||
return missingIndicesByRoot, verifiedSidecarsByRoot
|
||||
}
|
||||
|
||||
// fetchDataColumnSidecarsFromPeers retrieves data column sidecars from peers.
|
||||
@@ -857,13 +783,15 @@ func randomPeer(
|
||||
for ctx.Err() == nil {
|
||||
nonRateLimitedPeers := make([]goPeer.ID, 0, len(indicesByRootByPeer))
|
||||
for peer := range indicesByRootByPeer {
|
||||
if rateLimiter == nil || rateLimiter.Remaining(peer.String()) >= int64(count) {
|
||||
remaining := int64(math.MaxInt64)
|
||||
if rateLimiter != nil {
|
||||
remaining = rateLimiter.Remaining(peer.String())
|
||||
}
|
||||
if remaining >= int64(count) {
|
||||
nonRateLimitedPeers = append(nonRateLimitedPeers, peer)
|
||||
}
|
||||
}
|
||||
|
||||
slices.Sort(nonRateLimitedPeers)
|
||||
|
||||
if len(nonRateLimitedPeers) == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerCount": peerCount,
|
||||
|
||||
@@ -3,12 +3,10 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
@@ -21,6 +19,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
@@ -37,7 +36,6 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
// Slot 2: No commitment
|
||||
// Slot 3: All sidecars are saved excepted the needed ones
|
||||
// Slot 4: Some sidecars are in the storage, other have to be retrieved from peers.
|
||||
// Slot 5: Some sidecars are in the storage, other have to be retrieved from peers but peers do not deliver all requested sidecars.
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
@@ -95,27 +93,6 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
err = storage.Save(toStore4)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 5
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
block5, _, verifiedSidecars5 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(5))
|
||||
root5 := block5.Root()
|
||||
|
||||
toStoreCount := minimumColumnsCountToReconstruct - 1
|
||||
toStore5 := make([]blocks.VerifiedRODataColumn, 0, toStoreCount)
|
||||
|
||||
for i := uint64(0); uint64(len(toStore5)) < toStoreCount; i++ {
|
||||
sidecar := verifiedSidecars5[minimumColumnsCountToReconstruct+i]
|
||||
if sidecar.Index == 81 {
|
||||
continue
|
||||
}
|
||||
|
||||
toStore5 = append(toStore5, sidecar)
|
||||
}
|
||||
|
||||
err = storage.Save(toStore5)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Custody columns with this private key and 4-cgc: 31, 81, 97, 105
|
||||
privateKeyBytes := [32]byte{1}
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
|
||||
require.NoError(t, err)
|
||||
@@ -128,12 +105,12 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
p2p.Connect(other)
|
||||
|
||||
p2p.Peers().SetChainState(other.PeerID(), ðpb.StatusV2{
|
||||
HeadSlot: 5,
|
||||
HeadSlot: 4,
|
||||
})
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 4,
|
||||
Count: 2,
|
||||
Count: 1,
|
||||
Columns: []uint64{31, 81},
|
||||
}
|
||||
|
||||
@@ -161,9 +138,6 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars5[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
@@ -183,10 +157,9 @@ func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
// no root2 (no commitments in this block)
|
||||
root3: {verifiedSidecars3[31], verifiedSidecars3[81], verifiedSidecars3[106]},
|
||||
root4: {verifiedSidecars4[31], verifiedSidecars4[81], verifiedSidecars4[106]},
|
||||
root5: {verifiedSidecars5[31], verifiedSidecars5[81], verifiedSidecars5[106]},
|
||||
}
|
||||
|
||||
blocks := []blocks.ROBlock{block1, block2, block3, block4, block5}
|
||||
blocks := []blocks.ROBlock{block1, block2, block3, block4}
|
||||
actual, err := FetchDataColumnSidecars(params, blocks, indices)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -229,7 +202,7 @@ func TestCategorizeIndices(t *testing.T) {
|
||||
func TestSelectPeers(t *testing.T) {
|
||||
const (
|
||||
count = 3
|
||||
seed = 42
|
||||
seed = 46
|
||||
)
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
@@ -237,7 +210,7 @@ func TestSelectPeers(t *testing.T) {
|
||||
RateLimiter: leakybucket.NewCollector(1., 10, time.Second, false /* deleteEmptyBuckets */),
|
||||
}
|
||||
|
||||
randomSource := rand.New(rand.NewSource(seed))
|
||||
randomSource := rand.NewGenerator()
|
||||
|
||||
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
"peer1": {
|
||||
@@ -252,7 +225,19 @@ func TestSelectPeers(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
expected := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
expected_1 := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
"peer1": {
|
||||
{1}: {12: true, 13: true},
|
||||
{2}: {13: true, 14: true, 15: true},
|
||||
{3}: {14: true, 15: true},
|
||||
},
|
||||
"peer2": {
|
||||
{1}: {14: true},
|
||||
{3}: {16: true},
|
||||
},
|
||||
}
|
||||
|
||||
expected_2 := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
"peer1": {
|
||||
{1}: {12: true},
|
||||
{3}: {15: true},
|
||||
@@ -266,6 +251,11 @@ func TestSelectPeers(t *testing.T) {
|
||||
|
||||
actual, err := selectPeers(params, randomSource, count, indicesByRootByPeer)
|
||||
|
||||
expected := expected_1
|
||||
if len(actual["peer1"]) == 2 {
|
||||
expected = expected_2
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for peerID := range expected {
|
||||
@@ -301,8 +291,8 @@ func TestUpdateResults(t *testing.T) {
|
||||
verifiedSidecars[2].BlockRoot(): {verifiedSidecars[2], verifiedSidecars[3]},
|
||||
}
|
||||
|
||||
actualVerifiedSidecarsByRoot := updateResults(verifiedSidecars, missingIndicesByRoot)
|
||||
require.DeepEqual(t, expectedMissingIndicesByRoot, missingIndicesByRoot)
|
||||
actualMissingIndicesByRoot, actualVerifiedSidecarsByRoot := updateResults(verifiedSidecars, missingIndicesByRoot)
|
||||
require.DeepEqual(t, expectedMissingIndicesByRoot, actualMissingIndicesByRoot)
|
||||
require.DeepEqual(t, expectedVerifiedSidecarsByRoot, actualVerifiedSidecarsByRoot)
|
||||
}
|
||||
|
||||
@@ -867,8 +857,8 @@ func TestComputeIndicesByRootByPeer(t *testing.T) {
|
||||
|
||||
func TestRandomPeer(t *testing.T) {
|
||||
// Fixed seed.
|
||||
const seed = 43
|
||||
randomSource := rand.New(rand.NewSource(seed))
|
||||
const seed = 42
|
||||
randomSource := rand.NewGenerator()
|
||||
|
||||
t.Run("no peers", func(t *testing.T) {
|
||||
pid, err := randomPeer(t.Context(), randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, nil)
|
||||
@@ -899,11 +889,7 @@ func TestRandomPeer(t *testing.T) {
|
||||
|
||||
pid, err := randomPeer(t.Context(), randomSource, collector, count, indicesByRootByPeer)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, peer1, pid)
|
||||
|
||||
pid, err = randomPeer(t.Context(), randomSource, collector, count, indicesByRootByPeer)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, peer2, pid)
|
||||
require.Equal(t, true, map[peer.ID]bool{peer1: true, peer2: true, peer3: true}[pid])
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,6 @@ go_library(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -109,9 +108,7 @@ go_test(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_paulbellamy_ratecounter//:go_default_library",
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
@@ -210,8 +209,6 @@ func (s *Service) Start() {
|
||||
|
||||
// fetchOriginSidecars fetches origin sidecars
|
||||
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
const delay = 10 * time.Second // The delay between each attempt to fetch origin data column sidecars
|
||||
|
||||
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
@@ -237,7 +234,7 @@ func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
blockVersion := roBlock.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.fetchOriginColumns(roBlock, delay); err != nil {
|
||||
if err := s.fetchOriginColumns(peers, roBlock); err != nil {
|
||||
return errors.Wrap(err, "fetch origin columns")
|
||||
}
|
||||
return nil
|
||||
@@ -394,11 +391,7 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration) error {
|
||||
const (
|
||||
errorMessage = "Failed to fetch origin data column sidecars"
|
||||
warningIteration = 10
|
||||
)
|
||||
func (s *Service) fetchOriginColumns(pids []peer.ID, roBlock blocks.ROBlock) error {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early if the origin block has no blob commitments.
|
||||
@@ -427,40 +420,21 @@ func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration
|
||||
root := roBlock.Root()
|
||||
|
||||
params := sync.DataColumnSidecarsParams{
|
||||
Ctx: s.ctx,
|
||||
Tor: s.clock,
|
||||
P2P: s.cfg.P2P,
|
||||
CtxMap: s.ctxMap,
|
||||
Storage: s.cfg.DataColumnStorage,
|
||||
NewVerifier: s.newDataColumnsVerifier,
|
||||
DownscorePeerOnRPCFault: true,
|
||||
Ctx: s.ctx,
|
||||
Tor: s.clock,
|
||||
P2P: s.cfg.P2P,
|
||||
CtxMap: s.ctxMap,
|
||||
Storage: s.cfg.DataColumnStorage,
|
||||
NewVerifier: s.newDataColumnsVerifier,
|
||||
}
|
||||
|
||||
var verifiedRoDataColumnsByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn
|
||||
for attempt := uint64(0); ; attempt++ {
|
||||
verifiedRoDataColumnsByRoot, err = sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
log := log.WithError(err).WithFields(logrus.Fields{
|
||||
"attempt": attempt,
|
||||
"delay": delay,
|
||||
})
|
||||
|
||||
if attempt%warningIteration == 0 && attempt > 0 {
|
||||
log.Warning(errorMessage)
|
||||
time.Sleep(delay)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debug(errorMessage)
|
||||
time.Sleep(delay)
|
||||
verfifiedRoDataColumnsByRoot, err := sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
// Save origin data columns to disk.
|
||||
verifiedRoDataColumnsSidecars, ok := verifiedRoDataColumnsByRoot[root]
|
||||
verifiedRoDataColumnsSidecars, ok := verfifiedRoDataColumnsByRoot[root]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot extract origins data column sidecars for block root %#x - should never happen", root)
|
||||
}
|
||||
@@ -473,7 +447,7 @@ func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration
|
||||
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"blobCount": len(commitments),
|
||||
"columnCount": len(verifiedRoDataColumnsSidecars),
|
||||
}).Info("Successfully downloaded data column sidecars for checkpoint sync block")
|
||||
}).Info("Successfully downloaded data columns for checkpoint sync block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package initialsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -14,12 +13,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
prysmSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -27,14 +22,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
@@ -672,147 +663,3 @@ func TestFetchOriginSidecars(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestFetchOriginColumns(t *testing.T) {
|
||||
// Load the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Setup test environment
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
const (
|
||||
delay = 0
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
t.Run("block has no commitments", func(t *testing.T) {
|
||||
service := new(Service)
|
||||
|
||||
// Create a block with no blob commitments
|
||||
block := util.NewBeaconBlockFulu()
|
||||
signedBlock, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
roBlock, err := blocks.NewROBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("FetchDataColumnSidecars succeeds immediately", func(t *testing.T) {
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
p2p := p2ptest.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
P2P: p2p,
|
||||
DataColumnStorage: storage,
|
||||
},
|
||||
}
|
||||
|
||||
// Create a block with blob commitments and sidecars
|
||||
roBlock, _, verifiedSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
// Store all sidecars in advance so FetchDataColumnSidecars succeeds immediately
|
||||
err := storage.Save(verifiedSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("first attempt to FetchDataColumnSidecars fails but second attempt succeeds", func(t *testing.T) {
|
||||
numberOfCustodyGroups := params.BeaconConfig().NumberOfCustodyGroups
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
// Custody columns with this private key and 4-cgc: 31, 81, 97, 105
|
||||
privateKeyBytes := [32]byte{1}
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
p2p.Peers().SetConnectionState(other.PeerID(), peers.Connected)
|
||||
p2p.Connect(other)
|
||||
|
||||
p2p.Peers().SetChainState(other.PeerID(), ðpb.StatusV2{
|
||||
HeadSlot: 5,
|
||||
})
|
||||
|
||||
other.ENR().Set(peerdas.Cgc(numberOfCustodyGroups))
|
||||
p2p.Peers().UpdateENR(other.ENR(), other.PeerID())
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 0,
|
||||
Count: 1,
|
||||
Columns: []uint64{1, 17, 19, 42, 75, 87, 102, 117},
|
||||
}
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
gs := startup.NewClockSynchronizer()
|
||||
err = gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||
|
||||
// Create a block with blob commitments and sidecars
|
||||
roBlock, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
ctxMap, err := prysmSync.ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := &Service{
|
||||
ctx: t.Context(),
|
||||
clock: clock,
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
cfg: &Config{
|
||||
P2P: p2p,
|
||||
DataColumnStorage: storage,
|
||||
},
|
||||
ctxMap: ctxMap,
|
||||
}
|
||||
|
||||
// Do not respond any sidecar on the first attempt, and respond everything requested on the second one.
|
||||
firstAttempt := true
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||
|
||||
if firstAttempt {
|
||||
firstAttempt = false
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, column := range actualRequest.Columns {
|
||||
err = prysmSync.WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedRoSidecars[column].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
err = service.fetchOriginColumns(roBlock, delay)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check all corresponding sidecars are saved in the store.
|
||||
summary := storage.Summary(roBlock.Root())
|
||||
for _, index := range expectedRequest.Columns {
|
||||
require.Equal(t, true, summary.HasIndex(index))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -453,10 +453,6 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
// Send the request.
|
||||
stream, err := p.P2P.Send(p.Ctx, request, topic, pid)
|
||||
if err != nil {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, pid, "cannotSendDataColumnSidecarsByRangeRequest")
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "p2p send")
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
@@ -471,10 +467,6 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
|
||||
validatorSlotWithinBounds, err := isSidecarSlotWithinBounds(request)
|
||||
if err != nil {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, pid, "servedSidecarSlotOutOfBounds")
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "is sidecar slot within bounds")
|
||||
}
|
||||
|
||||
@@ -484,17 +476,9 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
isSidecarIndexRequested(request),
|
||||
)
|
||||
if errors.Is(err, io.EOF) {
|
||||
if p.DownscorePeerOnRPCFault && len(roDataColumns) == 0 {
|
||||
downscorePeer(p.P2P, pid, "noReturnedSidecar")
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
if err != nil {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, pid, "readChunkedDataColumnSidecarError")
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "read chunked data column sidecar")
|
||||
}
|
||||
|
||||
@@ -507,10 +491,6 @@ func SendDataColumnSidecarsByRangeRequest(
|
||||
|
||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, pid, "tooManyResponseDataColumnSidecars")
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", totalCount)
|
||||
}
|
||||
|
||||
@@ -548,8 +528,7 @@ func isSidecarIndexRequested(request *ethpb.DataColumnSidecarsByRangeRequest) Da
|
||||
return func(sidecar blocks.RODataColumn) error {
|
||||
columnIndex := sidecar.Index
|
||||
if !requestedIndices[columnIndex] {
|
||||
requested := sortedSliceFromMap(requestedIndices)
|
||||
return errors.Errorf("data column sidecar index %d returned by the peer but not found in requested indices %v", columnIndex, requested)
|
||||
return errors.Errorf("data column sidecar index %d not found in requested indices", columnIndex)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -587,10 +566,6 @@ func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer
|
||||
// Send the request to the peer.
|
||||
stream, err := p.P2P.Send(p.Ctx, identifiers, topic, peer)
|
||||
if err != nil {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, peer, "cannotSendDataColumnSidecarsByRootRequest")
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "p2p api send")
|
||||
}
|
||||
defer closeStream(stream, log)
|
||||
@@ -602,17 +577,9 @@ func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer
|
||||
for range count {
|
||||
roDataColumn, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap, isSidecarIndexRootRequested(identifiers))
|
||||
if errors.Is(err, io.EOF) {
|
||||
if p.DownscorePeerOnRPCFault && len(roDataColumns) == 0 {
|
||||
downscorePeer(p.P2P, peer, "noReturnedSidecar")
|
||||
}
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
if err != nil {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, peer, "readChunkedDataColumnSidecarError")
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "read chunked data column sidecar")
|
||||
}
|
||||
|
||||
@@ -625,10 +592,6 @@ func SendDataColumnSidecarsByRootRequest(p DataColumnSidecarsParams, peer goPeer
|
||||
|
||||
// All requested sidecars were delivered by the peer. Expecting EOF.
|
||||
if _, err := readChunkedDataColumnSidecar(stream, p.P2P, p.CtxMap); !errors.Is(err, io.EOF) {
|
||||
if p.DownscorePeerOnRPCFault {
|
||||
downscorePeer(p.P2P, peer, "tooManyResponseDataColumnSidecars")
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(errMaxResponseDataColumnSidecarsExceeded, "requestedCount=%d", count)
|
||||
}
|
||||
|
||||
@@ -726,13 +689,3 @@ func readChunkedDataColumnSidecar(
|
||||
|
||||
return &roDataColumn, nil
|
||||
}
|
||||
|
||||
func downscorePeer(p2p p2p.P2P, peerID peer.ID, reason string, fields ...logrus.Fields) {
|
||||
log := log
|
||||
for _, field := range fields {
|
||||
log = log.WithFields(field)
|
||||
}
|
||||
|
||||
newScore := p2p.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
@@ -230,7 +230,6 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
handle: s.dataColumnSubscriber,
|
||||
digest: digest,
|
||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||
// TODO: Should we find peers always? When validators are managed? When validators are managed AND when we are going to propose a block?
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -49,7 +48,7 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationReject, p2p.ErrInvalidTopic
|
||||
}
|
||||
|
||||
// Decode the message.
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
@@ -69,20 +68,6 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
return pubsub.ValidationReject, errors.Wrap(err, "roDataColumn conversion failure")
|
||||
}
|
||||
|
||||
// Voluntary ignore messages (for debugging purposes).
|
||||
dataColumnsIgnoreSlotMultiple := features.Get().DataColumnsIgnoreSlotMultiple
|
||||
blockSlot := uint64(roDataColumn.SignedBlockHeader.Header.Slot)
|
||||
|
||||
if dataColumnsIgnoreSlotMultiple != 0 && blockSlot%dataColumnsIgnoreSlotMultiple == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blockSlot,
|
||||
"columnIndex": roDataColumn.Index,
|
||||
"blockRoot": fmt.Sprintf("%#x", roDataColumn.BlockRoot()),
|
||||
}).Warning("Voluntary ignore data column sidecar gossip")
|
||||
|
||||
return pubsub.ValidationIgnore, err
|
||||
}
|
||||
|
||||
// Compute a batch of only one data column sidecar.
|
||||
roDataColumns := []blocks.RODataColumn{roDataColumn}
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Fulu block proposal changes for beacon api and gRPC.
|
||||
@@ -1,4 +0,0 @@
|
||||
### Added
|
||||
- In FetchDataColumnSidecars, after retrieving sidecars from peers, if still some sidecars are missing for a given root and if a reconstruction is possible (combining sidecars already retrieved from peers and sidecars in the storage), then reconstruct missing sidecars instead of trying to fetch the missing ones from peers.
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
- Warm data columns storage cache at start.
|
||||
- Add `--data-column-path` flag.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Added
|
||||
- Retry to fetch origin data column sidecars when starting from a checkpoint.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Fixed
|
||||
- Fix bug where arguments of fillInForkChoiceMissingBlocks were incorrectly placed
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Fix error message.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Changed
|
||||
- Start from justified checkpoint by default.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Update gohashtree to v0.0.5-beta.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Update Github bug template.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Changed old atomic functions to new atomic.Int for safer and clearer code.
|
||||
3
changelog/satushh-dont-sync-from-genesis-bydefault.md
Normal file
3
changelog/satushh-dont-sync-from-genesis-bydefault.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
- A new flag to ensure you have to specify whether sync from genesis or checkpoint in case of empty DB.
|
||||
- Addressing this issue: https://github.com/OffchainLabs/prysm/issues/13020
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Initialize package for SSZ Query Language.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Updated consensus spec from v1.6.0-alpha.4 to v1.6.0-alpha.5 with adjusted minimal config parameters
|
||||
@@ -216,7 +216,6 @@ var (
|
||||
DataColumnBatchLimit = &cli.IntFlag{
|
||||
Name: "data-column-batch-limit",
|
||||
Usage: "The amount of data columns the local peer is bounded to request and respond to in a batch.",
|
||||
// TODO: determine a good default value for this flag.
|
||||
Value: 4096,
|
||||
}
|
||||
// DataColumnBatchLimitBurstFactor specifies the factor by which data column batch size may increase.
|
||||
@@ -272,6 +271,13 @@ var (
|
||||
"If such a sync is not possible, the node will treat it as a critical and irrecoverable failure",
|
||||
Value: "",
|
||||
}
|
||||
// SyncFromGenesis enables syncing from genesis when starting with an empty database.
|
||||
SyncFromGenesis = &cli.BoolFlag{
|
||||
Name: "sync-from-genesis",
|
||||
Usage: "Explicitly enables syncing from genesis when starting with an empty database. " +
|
||||
"Alternately you can checkpoint sync instead with " +
|
||||
"--checkpoint-sync-url, --checkpoint-state, or --checkpoint-block flags.",
|
||||
}
|
||||
// MinPeersPerSubnet defines a flag to set the minimum number of peers that a node will attempt to peer with for a subnet.
|
||||
MinPeersPerSubnet = &cli.Uint64Flag{
|
||||
Name: "minimum-peers-per-subnet",
|
||||
|
||||
@@ -65,11 +65,11 @@ var appFlags = []cli.Flag{
|
||||
flags.SlotsPerArchivedPoint,
|
||||
flags.DisableDebugRPCEndpoints,
|
||||
flags.SubscribeToAllSubnets,
|
||||
flags.SubscribeAllDataSubnets,
|
||||
flags.HistoricalSlasherNode,
|
||||
flags.ChainID,
|
||||
flags.NetworkID,
|
||||
flags.WeakSubjectivityCheckpoint,
|
||||
flags.SyncFromGenesis,
|
||||
flags.Eth1HeaderReqLimit,
|
||||
flags.MinPeersPerSubnet,
|
||||
flags.MaxConcurrentDials,
|
||||
@@ -87,6 +87,7 @@ var appFlags = []cli.Flag{
|
||||
flags.BeaconDBPruning,
|
||||
flags.PrunerRetentionEpochs,
|
||||
flags.EnableBuilderSSZ,
|
||||
flags.SubscribeAllDataSubnets,
|
||||
cmd.MinimalConfigFlag,
|
||||
cmd.E2EConfigFlag,
|
||||
cmd.RPCMaxPageSizeFlag,
|
||||
@@ -144,7 +145,6 @@ var appFlags = []cli.Flag{
|
||||
flags.SlasherFlag,
|
||||
flags.JwtId,
|
||||
storage.BlobStoragePathFlag,
|
||||
storage.DataColumnStoragePathFlag,
|
||||
storage.BlobRetentionEpochFlag,
|
||||
storage.BlobStorageLayout,
|
||||
bflags.EnableExperimentalBackfill,
|
||||
|
||||
@@ -61,12 +61,3 @@ func TestConfigureBlobRetentionEpoch(t *testing.T) {
|
||||
_, err = blobRetentionEpoch(cliCtx)
|
||||
require.ErrorIs(t, err, errInvalidBlobRetentionEpochs)
|
||||
}
|
||||
func TestDataColumnStoragePath_FlagSpecified(t *testing.T) {
|
||||
app := cli.App{}
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String(DataColumnStoragePathFlag.Name, "/blah/blah", DataColumnStoragePathFlag.Usage)
|
||||
cliCtx := cli.NewContext(&app, set, nil)
|
||||
storagePath := dataColumnStoragePath(cliCtx)
|
||||
|
||||
assert.Equal(t, "/blah/blah", storagePath)
|
||||
}
|
||||
|
||||
@@ -127,7 +127,6 @@ var appHelpFlagGroups = []flagGroup{
|
||||
storage.BlobRetentionEpochFlag,
|
||||
storage.BlobStorageLayout,
|
||||
storage.BlobStoragePathFlag,
|
||||
storage.DataColumnStoragePathFlag,
|
||||
},
|
||||
},
|
||||
{ // Flags relevant to configuring local block production or external builders such as mev-boost.
|
||||
@@ -149,6 +148,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
checkpoint.BlockPath,
|
||||
checkpoint.RemoteURL,
|
||||
checkpoint.StatePath,
|
||||
flags.SyncFromGenesis,
|
||||
flags.WeakSubjectivityCheckpoint,
|
||||
genesis.BeaconAPIURL,
|
||||
genesis.StatePath,
|
||||
|
||||
@@ -85,12 +85,6 @@ type Flags struct {
|
||||
// changed on disk. This feature is for advanced use cases only.
|
||||
KeystoreImportDebounceInterval time.Duration
|
||||
|
||||
// DataColumnsWithholdCount specifies the number of data columns that should be withheld when proposing a block.
|
||||
DataColumnsWithholdCount uint64
|
||||
|
||||
// DataColumnsIgnoreSlotMultiple specifies the multiple of slot number where data columns should be ignored.
|
||||
DataColumnsIgnoreSlotMultiple uint64
|
||||
|
||||
// AggregateIntervals specifies the time durations at which we aggregate attestations preparing for forkchoice.
|
||||
AggregateIntervals [3]time.Duration
|
||||
|
||||
@@ -286,16 +280,6 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
cfg.BlacklistedRoots = parseBlacklistedRoots(ctx.StringSlice(blacklistRoots.Name))
|
||||
}
|
||||
|
||||
if ctx.IsSet(DataColumnsWithholdCount.Name) {
|
||||
logEnabled(DataColumnsWithholdCount)
|
||||
cfg.DataColumnsWithholdCount = ctx.Uint64(DataColumnsWithholdCount.Name)
|
||||
}
|
||||
|
||||
if ctx.IsSet(DataColumnsIgnoreSlotMultiple.Name) {
|
||||
logEnabled(DataColumnsIgnoreSlotMultiple)
|
||||
cfg.DataColumnsIgnoreSlotMultiple = ctx.Uint64(DataColumnsIgnoreSlotMultiple.Name)
|
||||
}
|
||||
|
||||
cfg.AggregateIntervals = [3]time.Duration{aggregateFirstInterval.Value, aggregateSecondInterval.Value, aggregateThirdInterval.Value}
|
||||
Init(cfg)
|
||||
return nil
|
||||
|
||||
@@ -172,20 +172,6 @@ var (
|
||||
Name: "enable-experimental-attestation-pool",
|
||||
Usage: "Enables an experimental attestation pool design.",
|
||||
}
|
||||
// DataColumnsWithholdCount is a flag for withholding data columns when proposing a block.
|
||||
DataColumnsWithholdCount = &cli.Uint64Flag{
|
||||
Name: "data-columns-withhold-count",
|
||||
Usage: "Number of columns to withhold when proposing a block. DO NOT USE IN PRODUCTION.",
|
||||
Value: 0,
|
||||
Hidden: true,
|
||||
}
|
||||
// DataColumnsWithholdCount is a flag for withholding data columns when proposing a block.
|
||||
DataColumnsIgnoreSlotMultiple = &cli.Uint64Flag{
|
||||
Name: "data-columns-ignore-slot-multiple",
|
||||
Usage: "Ignore all data columns for slots that are a multiple of this value. DO NOT USE IN PRODUCTION.",
|
||||
Value: 0,
|
||||
Hidden: true,
|
||||
}
|
||||
// forceHeadFlag is a flag to force the head of the beacon chain to a specific block.
|
||||
forceHeadFlag = &cli.StringFlag{
|
||||
Name: "sync-from",
|
||||
@@ -269,8 +255,6 @@ var BeaconChainFlags = combinedFlags([]cli.Flag{
|
||||
DisableQUIC,
|
||||
EnableDiscoveryReboot,
|
||||
enableExperimentalAttestationPool,
|
||||
DataColumnsWithholdCount,
|
||||
DataColumnsIgnoreSlotMultiple,
|
||||
forceHeadFlag,
|
||||
blacklistRoots,
|
||||
}, deprecatedBeaconFlags, deprecatedFlags, upcomingDeprecation)
|
||||
|
||||
@@ -26,13 +26,13 @@ const (
|
||||
SyncCommitteeAggregationBytesLength = 1 // SyncCommitteeAggregationBytesLength defines the sync committee aggregate bytes.
|
||||
SyncAggregateSyncCommitteeBytesLength = 4 // SyncAggregateSyncCommitteeBytesLength defines the length of sync committee bytes in a sync aggregate.
|
||||
MaxWithdrawalsPerPayload = 4 // MaxWithdrawalsPerPayloadLength defines the maximum number of withdrawals that can be included in a payload.
|
||||
MaxBlobCommitmentsPerBlock = 4096 // MaxBlobCommitmentsPerBlock defines the theoretical limit of blobs can be included in a block.
|
||||
LogMaxBlobCommitments = 12 // Log_2 of MaxBlobCommitmentsPerBlock
|
||||
MaxBlobCommitmentsPerBlock = 32 // MaxBlobCommitmentsPerBlock defines the theoretical limit of blobs can be included in a block.
|
||||
LogMaxBlobCommitments = 5 // Log_2 of MaxBlobCommitmentsPerBlock
|
||||
BlobLength = 131072 // BlobLength defines the byte length of a blob.
|
||||
BlobSize = 131072 // defined to match blob.size in bazel ssz codegen
|
||||
BlobSidecarSize = 131928 // defined to match blob sidecar size in bazel ssz codegen
|
||||
KzgCommitmentSize = 48 // KzgCommitmentSize defines the byte length of a KZG commitment.
|
||||
KzgCommitmentInclusionProofDepth = 17 // Merkle proof depth for blob_kzg_commitments list item
|
||||
KzgCommitmentInclusionProofDepth = 10 // Merkle proof depth for blob_kzg_commitments list item
|
||||
ExecutionBranchDepth = 4 // ExecutionBranchDepth defines the number of leaves in a merkle proof of the execution payload header.
|
||||
SyncCommitteeBranchDepth = 5 // SyncCommitteeBranchDepth defines the number of leaves in a merkle proof of a sync committee.
|
||||
SyncCommitteeBranchDepthElectra = 6 // SyncCommitteeBranchDepthElectra defines the number of leaves in a merkle proof of a sync committee.
|
||||
|
||||
@@ -24,14 +24,8 @@ import (
|
||||
// These are variables that we don't use in Prysm. (i.e. future hardfork, light client... etc)
|
||||
// IMPORTANT: Use one field per line and sort these alphabetically to reduce conflicts.
|
||||
var placeholderFields = []string{
|
||||
"AGGREGRATE_DUE_BPS",
|
||||
"AGGREGRATE_DUE_BPS_GLOAS",
|
||||
"ATTESTATION_DEADLINE",
|
||||
"ATTESTATION_DUE_BPS",
|
||||
"ATTESTATION_DUE_BPS_GLOAS",
|
||||
"BLOB_SIDECAR_SUBNET_COUNT_FULU",
|
||||
"CONTRIBUTION_DUE_BPS",
|
||||
"CONTRIBUTION_DUE_BPS_GLOAS",
|
||||
"EIP6110_FORK_EPOCH",
|
||||
"EIP6110_FORK_VERSION",
|
||||
"EIP7002_FORK_EPOCH",
|
||||
@@ -43,26 +37,16 @@ var placeholderFields = []string{
|
||||
"EIP7805_FORK_EPOCH",
|
||||
"EIP7805_FORK_VERSION",
|
||||
"EPOCHS_PER_SHUFFLING_PHASE",
|
||||
"GLOAS_FORK_EPOCH",
|
||||
"GLOAS_FORK_VERSION",
|
||||
"INCLUSION_LIST_SUBMISSION_DEADLINE",
|
||||
"INCLUSION_LIST_SUBMISSION_DUE_BPS",
|
||||
"MAX_BYTES_PER_INCLUSION_LIST",
|
||||
"MAX_REQUEST_BLOB_SIDECARS_FULU",
|
||||
"MAX_REQUEST_INCLUSION_LIST",
|
||||
"MAX_REQUEST_PAYLOADS", // Compile time constant on BeaconBlockBody.ExecutionRequests
|
||||
"PAYLOAD_ATTESTATION_DUE_BPS",
|
||||
"PROPOSER_INCLUSION_LIST_CUTOFF",
|
||||
"PROPOSER_INCLUSION_LIST_CUTOFF_BPS",
|
||||
"PROPOSER_REORG_CUTOFF_BPS",
|
||||
"PROPOSER_SCORE_BOOST_EIP7732",
|
||||
"PROPOSER_SELECTION_GAP",
|
||||
"SLOT_DURATION_MS",
|
||||
"SYNC_MESSAGE_DUE_BPS",
|
||||
"SYNC_MESSAGE_DUE_BPS_GLOAS",
|
||||
"TARGET_NUMBER_OF_PEERS",
|
||||
"UPDATE_TIMEOUT",
|
||||
"VIEW_FREEZE_CUTOFF_BPS",
|
||||
"VIEW_FREEZE_DEADLINE",
|
||||
"WHISK_EPOCHS_PER_SHUFFLING_PHASE",
|
||||
"WHISK_FORK_EPOCH",
|
||||
|
||||
@@ -104,8 +104,8 @@ func MinimalSpecConfig() *BeaconChainConfig {
|
||||
minimalConfig.MinEpochsForBlockRequests = 272
|
||||
|
||||
// New Deneb params
|
||||
minimalConfig.MaxBlobCommitmentsPerBlock = 4096
|
||||
minimalConfig.KzgCommitmentInclusionProofDepth = 17
|
||||
minimalConfig.MaxBlobCommitmentsPerBlock = 32
|
||||
minimalConfig.KzgCommitmentInclusionProofDepth = 10
|
||||
|
||||
// New Electra params
|
||||
minimalConfig.MinPerEpochChurnLimitElectra = 64000000000
|
||||
|
||||
@@ -24,10 +24,10 @@ func ErrNotSupported(funcName string, ver int) error {
|
||||
|
||||
// ThreadSafeEnumerator is a thread-safe counter of all objects created since the node's start.
|
||||
type ThreadSafeEnumerator struct {
|
||||
counter atomic.Uint64
|
||||
counter uint64
|
||||
}
|
||||
|
||||
// Inc increments the enumerator and returns the new object count.
|
||||
func (c *ThreadSafeEnumerator) Inc() uint64 {
|
||||
return c.counter.Add(1)
|
||||
return atomic.AddUint64(&c.counter, 1)
|
||||
}
|
||||
|
||||
@@ -10,18 +10,18 @@ import (
|
||||
|
||||
// Arbitrary start time.
|
||||
var start = time.Date(1990, 1, 2, 0, 0, 0, 0, time.UTC).Round(0)
|
||||
var elapsed atomic.Int64
|
||||
var elapsed int64
|
||||
|
||||
// We provide atomic access to elapsed to avoid data races between multiple
|
||||
// concurrent goroutines during the tests.
|
||||
func getElapsed() time.Duration {
|
||||
return time.Duration(elapsed.Load())
|
||||
return time.Duration(atomic.LoadInt64(&elapsed))
|
||||
}
|
||||
func setElapsed(v time.Duration) {
|
||||
elapsed.Store(int64(v))
|
||||
atomic.StoreInt64(&elapsed, int64(v))
|
||||
}
|
||||
func addToElapsed(v time.Duration) {
|
||||
elapsed.Add(int64(v))
|
||||
atomic.AddInt64(&elapsed, int64(v))
|
||||
}
|
||||
|
||||
func reset(t *testing.T, c *Collector) {
|
||||
|
||||
4
deps.bzl
4
deps.bzl
@@ -2860,8 +2860,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_gohashtree",
|
||||
importpath = "github.com/prysmaticlabs/gohashtree",
|
||||
sum = "h1:ct41mg7HyIZd7uoSM/ud23f+3DxQG9tlMlQG+BVX23c=",
|
||||
version = "v0.0.5-beta",
|
||||
sum = "h1:VK7thFOnhxAZ/5aolr5Os4beiubuD08WiuiHyRqgwks=",
|
||||
version = "v0.0.4-beta.0.20240624100937-73632381301b",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_prysmaticlabs_prombbolt",
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"analyzer.go",
|
||||
"container.go",
|
||||
"path.go",
|
||||
"query.go",
|
||||
"ssz_info.go",
|
||||
"ssz_type.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/encoding/ssz/query",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"analyzer_test.go",
|
||||
"path_test.go",
|
||||
"query_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//encoding/ssz/query/testutil:go_default_library",
|
||||
"//proto/ssz_query:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,242 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
offsetBytes = 4
|
||||
|
||||
// sszMaxTag specifies the maximum capacity of a variable-sized collection, like an SSZ List.
|
||||
sszMaxTag = "ssz-max"
|
||||
|
||||
// sszSizeTag specifies the length of a fixed-sized collection, like an SSZ Vector.
|
||||
// A wildcard ('?') indicates that the dimension is variable-sized (a List).
|
||||
sszSizeTag = "ssz-size"
|
||||
)
|
||||
|
||||
// AnalyzeObject analyzes given object and returns its SSZ information.
|
||||
func AnalyzeObject(obj any) (*sszInfo, error) {
|
||||
value := dereferencePointer(obj)
|
||||
|
||||
info, err := analyzeType(value.Type(), nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not analyze type %s: %w", value.Type().Name(), err)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// analyzeType is an entry point that inspects a reflect.Type and computes its SSZ layout information.
|
||||
func analyzeType(typ reflect.Type, tag *reflect.StructTag) (*sszInfo, error) {
|
||||
switch typ.Kind() {
|
||||
// Basic types (e.g., uintN where N is 8, 16, 32, 64)
|
||||
// NOTE: uint128 and uint256 are represented as []byte in Go,
|
||||
// so we handle them as slices. See `analyzeHomogeneousColType`.
|
||||
case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Bool:
|
||||
return analyzeBasicType(typ)
|
||||
|
||||
case reflect.Slice:
|
||||
return analyzeHomogeneousColType(typ, tag)
|
||||
|
||||
case reflect.Struct:
|
||||
return analyzeContainerType(typ)
|
||||
|
||||
case reflect.Ptr:
|
||||
// Dereference pointer types.
|
||||
return analyzeType(typ.Elem(), tag)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported type %v for SSZ calculation", typ.Kind())
|
||||
}
|
||||
}
|
||||
|
||||
// analyzeBasicType analyzes SSZ basic types (uintN, bool) and returns its info.
|
||||
func analyzeBasicType(typ reflect.Type) (*sszInfo, error) {
|
||||
sszInfo := &sszInfo{
|
||||
typ: typ,
|
||||
|
||||
// Every basic type is fixed-size and not variable.
|
||||
isVariable: false,
|
||||
}
|
||||
|
||||
switch typ.Kind() {
|
||||
case reflect.Uint64:
|
||||
sszInfo.sszType = UintN
|
||||
sszInfo.fixedSize = 8
|
||||
case reflect.Uint32:
|
||||
sszInfo.sszType = UintN
|
||||
sszInfo.fixedSize = 4
|
||||
case reflect.Uint16:
|
||||
sszInfo.sszType = UintN
|
||||
sszInfo.fixedSize = 2
|
||||
case reflect.Uint8:
|
||||
sszInfo.sszType = UintN
|
||||
sszInfo.fixedSize = 1
|
||||
case reflect.Bool:
|
||||
sszInfo.sszType = Boolean
|
||||
sszInfo.fixedSize = 1
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported basic type %v for SSZ calculation", typ.Kind())
|
||||
}
|
||||
|
||||
return sszInfo, nil
|
||||
}
|
||||
|
||||
// analyzeHomogeneousColType analyzes homogeneous collection types (e.g., List, Vector, Bitlist, Bitvector) and returns its SSZ info.
|
||||
func analyzeHomogeneousColType(typ reflect.Type, tag *reflect.StructTag) (*sszInfo, error) {
|
||||
if typ.Kind() != reflect.Slice {
|
||||
return nil, fmt.Errorf("can only analyze slice types, got %v", typ.Kind())
|
||||
}
|
||||
|
||||
if tag == nil {
|
||||
return nil, fmt.Errorf("tag is required for slice types")
|
||||
}
|
||||
|
||||
elementInfo, err := analyzeType(typ.Elem(), nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not analyze element type for homogeneous collection: %w", err)
|
||||
}
|
||||
|
||||
// 1. Check if the type is List/Bitlist by checking `ssz-max` tag.
|
||||
sszMax := tag.Get(sszMaxTag)
|
||||
if sszMax != "" {
|
||||
dims := strings.Split(sszMax, ",")
|
||||
if len(dims) > 1 {
|
||||
return nil, fmt.Errorf("multi-dimensional lists are not supported, got %d dimensions", len(dims))
|
||||
}
|
||||
|
||||
limit, err := strconv.ParseUint(dims[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ssz-max tag (%s): %w", sszMax, err)
|
||||
}
|
||||
|
||||
return analyzeListType(typ, elementInfo, limit)
|
||||
}
|
||||
|
||||
// 2. Handle Vector/Bitvector type.
|
||||
sszSize := tag.Get(sszSizeTag)
|
||||
dims := strings.Split(sszSize, ",")
|
||||
if len(dims) > 1 {
|
||||
return nil, fmt.Errorf("multi-dimensional vectors are not supported, got %d dimensions", len(dims))
|
||||
}
|
||||
|
||||
length, err := strconv.ParseUint(dims[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ssz-size tag (%s): %w", sszSize, err)
|
||||
}
|
||||
|
||||
return analyzeVectorType(typ, elementInfo, length)
|
||||
}
|
||||
|
||||
// analyzeListType analyzes SSZ List type and returns its SSZ info.
|
||||
func analyzeListType(typ reflect.Type, elementInfo *sszInfo, limit uint64) (*sszInfo, error) {
|
||||
if elementInfo == nil {
|
||||
return nil, fmt.Errorf("element info is required for List")
|
||||
}
|
||||
|
||||
return &sszInfo{
|
||||
sszType: List,
|
||||
typ: typ,
|
||||
|
||||
fixedSize: offsetBytes,
|
||||
isVariable: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// analyzeVectorType analyzes SSZ Vector type and returns its SSZ info.
|
||||
func analyzeVectorType(typ reflect.Type, elementInfo *sszInfo, length uint64) (*sszInfo, error) {
|
||||
if elementInfo == nil {
|
||||
return nil, fmt.Errorf("element info is required for Vector")
|
||||
}
|
||||
|
||||
return &sszInfo{
|
||||
sszType: Vector,
|
||||
typ: typ,
|
||||
|
||||
fixedSize: length * elementInfo.Size(),
|
||||
isVariable: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// analyzeContainerType analyzes SSZ Container type and returns its SSZ info.
|
||||
func analyzeContainerType(typ reflect.Type) (*sszInfo, error) {
|
||||
if typ.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("can only analyze struct types, got %v", typ.Kind())
|
||||
}
|
||||
|
||||
sszInfo := &sszInfo{
|
||||
sszType: Container,
|
||||
typ: typ,
|
||||
|
||||
containerInfo: make(map[string]*fieldInfo),
|
||||
}
|
||||
var currentOffset uint64
|
||||
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
field := typ.Field(i)
|
||||
|
||||
// Protobuf-generated structs contain private fields we must skip.
|
||||
// e.g., state, sizeCache, unknownFields, etc.
|
||||
if !field.IsExported() {
|
||||
continue
|
||||
}
|
||||
|
||||
// The JSON tag contains the field name in the first part.
|
||||
// e.g., "attesting_indices,omitempty" -> "attesting_indices".
|
||||
jsonTag := field.Tag.Get("json")
|
||||
if jsonTag == "" {
|
||||
return nil, fmt.Errorf("field %s has no JSON tag", field.Name)
|
||||
}
|
||||
|
||||
// NOTE: `fieldName` is a string with `snake_case` format (following consensus specs).
|
||||
fieldName := strings.Split(jsonTag, ",")[0]
|
||||
if fieldName == "" {
|
||||
return nil, fmt.Errorf("field %s has an empty JSON tag", field.Name)
|
||||
}
|
||||
|
||||
// Analyze each field so that we can complete full SSZ information.
|
||||
info, err := analyzeType(field.Type, &field.Tag)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not analyze type for field %s: %w", fieldName, err)
|
||||
}
|
||||
|
||||
// If one of the fields is variable-sized,
|
||||
// the entire struct is considered variable-sized.
|
||||
if info.isVariable {
|
||||
sszInfo.isVariable = true
|
||||
}
|
||||
|
||||
// Store nested struct info.
|
||||
sszInfo.containerInfo[fieldName] = &fieldInfo{
|
||||
sszInfo: info,
|
||||
offset: currentOffset,
|
||||
}
|
||||
|
||||
// Update the current offset based on the field's fixed size.
|
||||
currentOffset += info.fixedSize
|
||||
}
|
||||
|
||||
sszInfo.fixedSize = currentOffset
|
||||
|
||||
return sszInfo, nil
|
||||
}
|
||||
|
||||
// dereferencePointer dereferences a pointer to get the underlying value using reflection.
|
||||
func dereferencePointer(obj any) reflect.Value {
|
||||
value := reflect.ValueOf(obj)
|
||||
if value.Kind() == reflect.Ptr {
|
||||
if value.IsNil() {
|
||||
// If we encounter a nil pointer before the end of the path, we can still proceed
|
||||
// by analyzing the type, not the value.
|
||||
value = reflect.New(value.Type().Elem()).Elem()
|
||||
} else {
|
||||
value = value.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package query_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/query"
|
||||
sszquerypb "github.com/OffchainLabs/prysm/v6/proto/ssz_query"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestAnalyzeSSZInfo(t *testing.T) {
|
||||
info, err := query.AnalyzeObject(&sszquerypb.FixedTestContainer{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, info, "Expected non-nil SSZ info")
|
||||
require.Equal(t, uint64(333), info.FixedSize(), "Expected fixed size to be 333")
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
package query
|
||||
|
||||
// containerInfo maps a field's JSON name to its sszInfo for nested Containers.
|
||||
type containerInfo = map[string]*fieldInfo
|
||||
|
||||
type fieldInfo struct {
|
||||
// sszInfo contains the SSZ information of the field.
|
||||
sszInfo *sszInfo
|
||||
// offset is the offset of the field within the parent struct.
|
||||
offset uint64
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PathElement represents a single element in a path.
|
||||
type PathElement struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func ParsePath(rawPath string) ([]PathElement, error) {
|
||||
// We use dot notation, so we split the path by '.'.
|
||||
rawElements := strings.Split(rawPath, ".")
|
||||
if len(rawElements) == 0 {
|
||||
return nil, errors.New("empty path provided")
|
||||
}
|
||||
|
||||
if rawElements[0] == "" {
|
||||
// Remove leading dot if present
|
||||
rawElements = rawElements[1:]
|
||||
}
|
||||
|
||||
var path []PathElement
|
||||
for _, elem := range rawElements {
|
||||
path = append(path, PathElement{Name: elem})
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
package query_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/query"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestParsePath(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
expected []query.PathElement
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "simple nested path",
|
||||
path: "data.target.root",
|
||||
expected: []query.PathElement{
|
||||
{Name: "data"},
|
||||
{Name: "target"},
|
||||
{Name: "root"},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "simple nested path with leading dot",
|
||||
path: ".data.target.root",
|
||||
expected: []query.PathElement{
|
||||
{Name: "data"},
|
||||
{Name: "target"},
|
||||
{Name: "root"},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
parsedPath, err := query.ParsePath(tt.path)
|
||||
|
||||
if tt.wantErr {
|
||||
require.NotNil(t, err, "Expected error but got none")
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(tt.expected), len(parsedPath), "Expected %d path elements, got %d", len(tt.expected), len(parsedPath))
|
||||
require.DeepEqual(t, tt.expected, parsedPath, "Parsed path does not match expected path")
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package query
|
||||
|
||||
import "fmt"
|
||||
|
||||
func CalculateOffsetAndLength(sszInfo *sszInfo, path []PathElement) (*sszInfo, uint64, uint64, error) {
|
||||
if sszInfo == nil {
|
||||
return nil, 0, 0, fmt.Errorf("sszInfo is nil")
|
||||
}
|
||||
|
||||
if len(path) == 0 {
|
||||
return nil, 0, 0, fmt.Errorf("path is empty")
|
||||
}
|
||||
|
||||
walk := sszInfo
|
||||
currentOffset := uint64(0)
|
||||
|
||||
for _, elem := range path {
|
||||
fieldInfos, err := walk.ContainerInfo()
|
||||
if err != nil {
|
||||
return nil, 0, 0, fmt.Errorf("could not get field infos: %w", err)
|
||||
}
|
||||
|
||||
fieldInfo, exists := fieldInfos[elem.Name]
|
||||
if !exists {
|
||||
return nil, 0, 0, fmt.Errorf("field %s not found in fieldInfos", elem.Name)
|
||||
}
|
||||
|
||||
currentOffset += fieldInfo.offset
|
||||
walk = fieldInfo.sszInfo
|
||||
}
|
||||
|
||||
if walk.isVariable {
|
||||
return nil, 0, 0, fmt.Errorf("cannot calculate length for variable-sized type")
|
||||
}
|
||||
|
||||
return walk, currentOffset, walk.Size(), nil
|
||||
}
|
||||
@@ -1,200 +0,0 @@
|
||||
package query_test
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/query"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/query/testutil"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/ssz_query"
|
||||
sszquerypb "github.com/OffchainLabs/prysm/v6/proto/ssz_query"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestCalculateOffsetAndLength(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
path string
|
||||
expectedOffset uint64
|
||||
expectedLength uint64
|
||||
}{
|
||||
// Basic integer types
|
||||
{
|
||||
name: "field_uint32",
|
||||
path: ".field_uint32",
|
||||
expectedOffset: 0,
|
||||
expectedLength: 4,
|
||||
},
|
||||
{
|
||||
name: "field_uint64",
|
||||
path: ".field_uint64",
|
||||
expectedOffset: 4,
|
||||
expectedLength: 8,
|
||||
},
|
||||
// Boolean type
|
||||
{
|
||||
name: "field_bool",
|
||||
path: ".field_bool",
|
||||
expectedOffset: 12,
|
||||
expectedLength: 1,
|
||||
},
|
||||
// Fixed-size bytes
|
||||
{
|
||||
name: "field_bytes32",
|
||||
path: ".field_bytes32",
|
||||
expectedOffset: 13,
|
||||
expectedLength: 32,
|
||||
},
|
||||
// Nested container
|
||||
{
|
||||
name: "nested container",
|
||||
path: ".nested",
|
||||
expectedOffset: 45,
|
||||
expectedLength: 40,
|
||||
},
|
||||
{
|
||||
name: "nested value1",
|
||||
path: ".nested.value1",
|
||||
expectedOffset: 45,
|
||||
expectedLength: 8,
|
||||
},
|
||||
{
|
||||
name: "nested value2",
|
||||
path: ".nested.value2",
|
||||
expectedOffset: 53,
|
||||
expectedLength: 32,
|
||||
},
|
||||
// Vector field
|
||||
{
|
||||
name: "vector field",
|
||||
path: ".vector_field",
|
||||
expectedOffset: 85,
|
||||
expectedLength: 192, // 24 * 8 bytes
|
||||
},
|
||||
// Trailing field
|
||||
{
|
||||
name: "trailing_field",
|
||||
path: ".trailing_field",
|
||||
expectedOffset: 277,
|
||||
expectedLength: 56,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
path, err := query.ParsePath(tt.path)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := query.AnalyzeObject(&sszquerypb.FixedTestContainer{})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, offset, length, err := query.CalculateOffsetAndLength(info, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tt.expectedOffset, offset, "Expected offset to be %d", tt.expectedOffset)
|
||||
require.Equal(t, tt.expectedLength, length, "Expected length to be %d", tt.expectedLength)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTripSszInfo(t *testing.T) {
|
||||
specs := []testutil.TestSpec{
|
||||
getFixedTestContainerSpec(),
|
||||
}
|
||||
|
||||
for _, spec := range specs {
|
||||
testutil.RunStructTest(t, spec)
|
||||
}
|
||||
}
|
||||
|
||||
func createFixedTestContainer() any {
|
||||
fieldBytes32 := make([]byte, 32)
|
||||
for i := range fieldBytes32 {
|
||||
fieldBytes32[i] = byte(i + 24)
|
||||
}
|
||||
|
||||
nestedValue2 := make([]byte, 32)
|
||||
for i := range nestedValue2 {
|
||||
nestedValue2[i] = byte(i + 56)
|
||||
}
|
||||
|
||||
trailingField := make([]byte, 56)
|
||||
for i := range trailingField {
|
||||
trailingField[i] = byte(i + 88)
|
||||
}
|
||||
|
||||
return &ssz_query.FixedTestContainer{
|
||||
// Basic types
|
||||
FieldUint32: math.MaxUint32,
|
||||
FieldUint64: math.MaxUint64,
|
||||
FieldBool: true,
|
||||
|
||||
// Fixed-size bytes
|
||||
FieldBytes32: fieldBytes32,
|
||||
|
||||
// Nested container
|
||||
Nested: &sszquerypb.FixedNestedContainer{
|
||||
Value1: 123,
|
||||
Value2: nestedValue2,
|
||||
},
|
||||
|
||||
// Vector field
|
||||
VectorField: []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
|
||||
|
||||
// Trailing field
|
||||
TrailingField: trailingField,
|
||||
}
|
||||
}
|
||||
|
||||
func getFixedTestContainerSpec() testutil.TestSpec {
|
||||
testContainer := createFixedTestContainer().(*sszquerypb.FixedTestContainer)
|
||||
|
||||
return testutil.TestSpec{
|
||||
Name: "FixedTestContainer",
|
||||
Type: sszquerypb.FixedTestContainer{},
|
||||
Instance: testContainer,
|
||||
PathTests: []testutil.PathTest{
|
||||
// Basic types
|
||||
{
|
||||
Path: ".field_uint32",
|
||||
Expected: testContainer.FieldUint32,
|
||||
},
|
||||
{
|
||||
Path: ".field_uint64",
|
||||
Expected: testContainer.FieldUint64,
|
||||
},
|
||||
{
|
||||
Path: ".field_bool",
|
||||
Expected: testContainer.FieldBool,
|
||||
},
|
||||
// Fixed-size bytes
|
||||
{
|
||||
Path: ".field_bytes32",
|
||||
Expected: testContainer.FieldBytes32,
|
||||
},
|
||||
// Nested container
|
||||
{
|
||||
Path: ".nested",
|
||||
Expected: testContainer.Nested,
|
||||
},
|
||||
{
|
||||
Path: ".nested.value1",
|
||||
Expected: testContainer.Nested.Value1,
|
||||
},
|
||||
{
|
||||
Path: ".nested.value2",
|
||||
Expected: testContainer.Nested.Value2,
|
||||
},
|
||||
// Vector field
|
||||
{
|
||||
Path: ".vector_field",
|
||||
Expected: testContainer.VectorField,
|
||||
},
|
||||
// Trailing field
|
||||
{
|
||||
Path: ".trailing_field",
|
||||
Expected: testContainer.TrailingField,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// sszInfo holds the all necessary data for analyzing SSZ data types.
|
||||
type sszInfo struct {
|
||||
// Type of the SSZ structure (Basic, Container, List, etc.).
|
||||
sszType SSZType
|
||||
// Type in Go. Need this for unmarshaling.
|
||||
typ reflect.Type
|
||||
|
||||
// isVariable is true if the struct contains any variable-size fields.
|
||||
isVariable bool
|
||||
// fixedSize is the total size of the struct's fixed part.
|
||||
fixedSize uint64
|
||||
|
||||
// For Container types.
|
||||
containerInfo containerInfo
|
||||
}
|
||||
|
||||
func (info *sszInfo) FixedSize() uint64 {
|
||||
if info == nil {
|
||||
return 0
|
||||
}
|
||||
return info.fixedSize
|
||||
}
|
||||
|
||||
func (info *sszInfo) Size() uint64 {
|
||||
if info == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Easy case: if the type is not variable, we can return the fixed size.
|
||||
if !info.isVariable {
|
||||
return info.fixedSize
|
||||
}
|
||||
|
||||
// NOTE: Handle variable-sized types.
|
||||
return 0
|
||||
}
|
||||
|
||||
func (info *sszInfo) ContainerInfo() (containerInfo, error) {
|
||||
if info == nil {
|
||||
return nil, fmt.Errorf("sszInfo is nil")
|
||||
}
|
||||
|
||||
if info.sszType != Container {
|
||||
return nil, fmt.Errorf("sszInfo is not a Container type, got %s", info.sszType)
|
||||
}
|
||||
|
||||
if info.containerInfo == nil {
|
||||
return nil, fmt.Errorf("sszInfo.containerInfo is nil")
|
||||
}
|
||||
|
||||
return info.containerInfo, nil
|
||||
}
|
||||
|
||||
// Print returns a string representation of the sszInfo, which is useful for debugging.
|
||||
func (info *sszInfo) Print() string {
|
||||
if info == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
var builder strings.Builder
|
||||
printRecursive(info, &builder, "")
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func printRecursive(info *sszInfo, builder *strings.Builder, prefix string) {
|
||||
var sizeDesc string
|
||||
if info.isVariable {
|
||||
sizeDesc = "Variable-size"
|
||||
} else {
|
||||
sizeDesc = "Fixed-size"
|
||||
}
|
||||
|
||||
switch info.sszType {
|
||||
case Container:
|
||||
builder.WriteString(fmt.Sprintf("%s: %s (%s / fixed size: %d, total size: %d)\n", info.sszType, info.typ.Name(), sizeDesc, info.FixedSize(), info.Size()))
|
||||
default:
|
||||
builder.WriteString(fmt.Sprintf("%s (%s / size: %d)\n", info.sszType, sizeDesc, info.Size()))
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(info.containerInfo))
|
||||
for k := range info.containerInfo {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for i, key := range keys {
|
||||
connector := "├─"
|
||||
nextPrefix := prefix + "│ "
|
||||
if i == len(keys)-1 {
|
||||
connector = "└─"
|
||||
nextPrefix = prefix + " "
|
||||
}
|
||||
|
||||
builder.WriteString(fmt.Sprintf("%s%s %s (offset: %d) ", prefix, connector, key, info.containerInfo[key].offset))
|
||||
|
||||
if nestedInfo := info.containerInfo[key].sszInfo; nestedInfo != nil {
|
||||
printRecursive(nestedInfo, builder, nextPrefix)
|
||||
} else {
|
||||
builder.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
package query
|
||||
|
||||
import "fmt"
|
||||
|
||||
// SSZType represents the type supported by SSZ.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/ssz/simple-serialize.md#typing
|
||||
type SSZType int
|
||||
|
||||
// SSZ type constants.
|
||||
const (
|
||||
// Basic types
|
||||
UintN SSZType = iota
|
||||
Byte
|
||||
Boolean
|
||||
|
||||
// Composite types
|
||||
Container
|
||||
Vector
|
||||
List
|
||||
Bitvector
|
||||
Bitlist
|
||||
|
||||
// Added in EIP-7916
|
||||
ProgressiveList
|
||||
Union
|
||||
)
|
||||
|
||||
func (t SSZType) String() string {
|
||||
switch t {
|
||||
case UintN:
|
||||
return "UintN"
|
||||
case Byte:
|
||||
return "Byte"
|
||||
case Boolean:
|
||||
return "Boolean"
|
||||
case Container:
|
||||
return "Container"
|
||||
case Vector:
|
||||
return "Vector"
|
||||
case List:
|
||||
return "List"
|
||||
case Bitvector:
|
||||
return "Bitvector"
|
||||
case Bitlist:
|
||||
return "Bitlist"
|
||||
case ProgressiveList:
|
||||
return "ProgressiveList"
|
||||
case Union:
|
||||
return "Union"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown(%d)", t)
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"runner.go",
|
||||
"type.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/encoding/ssz/query/testutil",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//encoding/ssz/query:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,38 +0,0 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz/query"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
func RunStructTest(t *testing.T, spec TestSpec) {
|
||||
t.Run(spec.Name, func(t *testing.T) {
|
||||
info, err := query.AnalyzeObject(spec.Type)
|
||||
require.NoError(t, err)
|
||||
|
||||
testInstance := spec.Instance
|
||||
marshaller, ok := testInstance.(ssz.Marshaler)
|
||||
require.Equal(t, true, ok, "Test instance must implement ssz.Marshaler, got %T", testInstance)
|
||||
|
||||
marshalledData, err := marshaller.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, pathTest := range spec.PathTests {
|
||||
t.Run(pathTest.Path, func(t *testing.T) {
|
||||
path, err := query.ParsePath(pathTest.Path)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, offset, length, err := query.CalculateOffsetAndLength(info, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedRawBytes := marshalledData[offset : offset+length]
|
||||
rawBytes, err := marshalAny(pathTest.Expected)
|
||||
require.NoError(t, err, "Marshalling expected value should not return an error")
|
||||
require.DeepEqual(t, expectedRawBytes, rawBytes, "Extracted value should match expected")
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package testutil
|
||||
|
||||
type PathTest struct {
|
||||
Path string
|
||||
Expected any
|
||||
}
|
||||
|
||||
type TestSpec struct {
|
||||
Name string
|
||||
Type any
|
||||
Instance any
|
||||
PathTests []PathTest
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
// marshalAny marshals any value into SSZ format.
|
||||
func marshalAny(value any) ([]byte, error) {
|
||||
// First check if it implements ssz.Marshaler (this catches custom types like primitives.Epoch)
|
||||
if marshaler, ok := value.(ssz.Marshaler); ok {
|
||||
return marshaler.MarshalSSZ()
|
||||
}
|
||||
|
||||
// Handle custom type aliases by checking if they're based on primitive types
|
||||
valueType := reflect.TypeOf(value)
|
||||
if valueType.PkgPath() != "" {
|
||||
switch valueType.Kind() {
|
||||
case reflect.Uint64:
|
||||
return ssz.MarshalUint64(make([]byte, 0), reflect.ValueOf(value).Uint()), nil
|
||||
case reflect.Uint32:
|
||||
return ssz.MarshalUint32(make([]byte, 0), uint32(reflect.ValueOf(value).Uint())), nil
|
||||
case reflect.Bool:
|
||||
return ssz.MarshalBool(make([]byte, 0), reflect.ValueOf(value).Bool()), nil
|
||||
}
|
||||
}
|
||||
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
return v, nil
|
||||
case []uint64:
|
||||
buf := make([]byte, 0, len(v)*8)
|
||||
for _, val := range v {
|
||||
buf = ssz.MarshalUint64(buf, val)
|
||||
}
|
||||
return buf, nil
|
||||
case uint64:
|
||||
return ssz.MarshalUint64(make([]byte, 0), v), nil
|
||||
case uint32:
|
||||
return ssz.MarshalUint32(make([]byte, 0), v), nil
|
||||
case bool:
|
||||
return ssz.MarshalBool(make([]byte, 0), v), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported type for SSZ marshalling: %T", value)
|
||||
}
|
||||
}
|
||||
2
go.mod
2
go.mod
@@ -282,7 +282,7 @@ require (
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.13.0
|
||||
github.com/peterh/liner v1.2.0 // indirect
|
||||
github.com/prysmaticlabs/gohashtree v0.0.5-beta
|
||||
github.com/prysmaticlabs/gohashtree v0.0.4-beta.0.20240624100937-73632381301b
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -900,8 +900,8 @@ github.com/prysmaticlabs/fastssz v0.0.0-20241008181541-518c4ce73516/go.mod h1:h2
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20210108222456-8e92c3709aa0/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e h1:ATgOe+abbzfx9kCPeXIW4fiWyDdxlwHw07j8UGhdTd4=
|
||||
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.5-beta h1:ct41mg7HyIZd7uoSM/ud23f+3DxQG9tlMlQG+BVX23c=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.5-beta/go.mod h1:HRuvtXLZ4WkaB1MItToVH2e8ZwKwZPY5/Rcby+CvvLY=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.4-beta.0.20240624100937-73632381301b h1:VK7thFOnhxAZ/5aolr5Os4beiubuD08WiuiHyRqgwks=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.4-beta.0.20240624100937-73632381301b/go.mod h1:HRuvtXLZ4WkaB1MItToVH2e8ZwKwZPY5/Rcby+CvvLY=
|
||||
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c h1:9PHRCuO/VN0s9k+RmLykho7AjDxblNYI5bYKed16NPU=
|
||||
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c/go.mod h1:ZRws458tYHS/Zs936OQ6oCrL+Ict5O4Xpwve1UQ6C9M=
|
||||
github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294 h1:q9wE0ZZRdTUAAeyFP/w0SwBEnCqlVy2+on6X2/e+eAU=
|
||||
|
||||
@@ -63,9 +63,9 @@ minimal = {
|
||||
"logs_bloom.size": "256",
|
||||
"extra_data.size": "32",
|
||||
"max_blobs_per_block.size": "6",
|
||||
"max_blob_commitments.size": "4096",
|
||||
"max_blob_commitments.size": "32",
|
||||
"max_cell_proofs_length.size": "33554432", # FIELD_ELEMENTS_PER_EXT_BLOB * MAX_BLOB_COMMITMENTS_PER_BLOCK
|
||||
"kzg_commitment_inclusion_proof_depth.size": "17",
|
||||
"kzg_commitment_inclusion_proof_depth.size": "10",
|
||||
"max_withdrawal_requests_per_payload.size": "16",
|
||||
"max_deposit_requests_per_payload.size": "8192",
|
||||
"max_attesting_indices.size": "8192",
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
load("@rules_proto//proto:defs.bzl", "proto_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
|
||||
load("//proto:ssz_proto_library.bzl", "ssz_proto_files")
|
||||
load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
|
||||
|
||||
# gazelle:ignore
|
||||
|
||||
proto_library(
|
||||
name = "proto",
|
||||
srcs = ["ssz_query.proto"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//proto/eth/ext:proto",
|
||||
],
|
||||
)
|
||||
|
||||
go_proto_library(
|
||||
name = "go_proto",
|
||||
compilers = [
|
||||
"@com_github_prysmaticlabs_protoc_gen_go_cast//:go_cast_grpc",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/proto/ssz_query",
|
||||
proto = ":proto",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//proto/eth/ext:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@org_golang_google_protobuf//reflect/protoreflect:go_default_library",
|
||||
"@org_golang_google_protobuf//runtime/protoimpl:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
# SSZ generation for test proto messages
|
||||
ssz_gen_marshal(
|
||||
name = "ssz_generated",
|
||||
out = "ssz_query.ssz.go",
|
||||
go_proto = ":go_proto",
|
||||
objs = [
|
||||
"FixedTestContainer",
|
||||
"FixedNestedContainer",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
":ssz_generated", # keep
|
||||
],
|
||||
embed = [":go_proto"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/proto/ssz_query",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = SSZ_DEPS + [
|
||||
"//proto/eth/ext:go_default_library",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
ssz_proto_files(
|
||||
name = "ssz_proto_files",
|
||||
srcs = ["ssz_query.proto"],
|
||||
config = select({
|
||||
"//conditions:default": "mainnet",
|
||||
"//proto:ssz_mainnet": "mainnet",
|
||||
"//proto:ssz_minimal": "minimal",
|
||||
}),
|
||||
)
|
||||
289
proto/ssz_query/ssz_query.pb.go
generated
289
proto/ssz_query/ssz_query.pb.go
generated
@@ -1,289 +0,0 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc v3.21.7
|
||||
// source: proto/ssz_query/ssz_query.proto
|
||||
|
||||
package ssz_query
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
_ "github.com/OffchainLabs/prysm/v6/proto/eth/ext"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type FixedNestedContainer struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Value1 uint64 `protobuf:"varint,1,opt,name=value1,proto3" json:"value1,omitempty"`
|
||||
Value2 []byte `protobuf:"bytes,2,opt,name=value2,proto3" json:"value2,omitempty" ssz-size:"32"`
|
||||
}
|
||||
|
||||
func (x *FixedNestedContainer) Reset() {
|
||||
*x = FixedNestedContainer{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_ssz_query_ssz_query_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FixedNestedContainer) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FixedNestedContainer) ProtoMessage() {}
|
||||
|
||||
func (x *FixedNestedContainer) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_ssz_query_ssz_query_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FixedNestedContainer.ProtoReflect.Descriptor instead.
|
||||
func (*FixedNestedContainer) Descriptor() ([]byte, []int) {
|
||||
return file_proto_ssz_query_ssz_query_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *FixedNestedContainer) GetValue1() uint64 {
|
||||
if x != nil {
|
||||
return x.Value1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *FixedNestedContainer) GetValue2() []byte {
|
||||
if x != nil {
|
||||
return x.Value2
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FixedTestContainer struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
FieldUint32 uint32 `protobuf:"varint,3,opt,name=field_uint32,json=fieldUint32,proto3" json:"field_uint32,omitempty"`
|
||||
FieldUint64 uint64 `protobuf:"varint,4,opt,name=field_uint64,json=fieldUint64,proto3" json:"field_uint64,omitempty"`
|
||||
FieldBool bool `protobuf:"varint,5,opt,name=field_bool,json=fieldBool,proto3" json:"field_bool,omitempty"`
|
||||
FieldBytes32 []byte `protobuf:"bytes,8,opt,name=field_bytes32,json=fieldBytes32,proto3" json:"field_bytes32,omitempty" ssz-size:"32"`
|
||||
Nested *FixedNestedContainer `protobuf:"bytes,9,opt,name=nested,proto3" json:"nested,omitempty"`
|
||||
VectorField []uint64 `protobuf:"varint,10,rep,packed,name=vector_field,json=vectorField,proto3" json:"vector_field,omitempty" ssz-size:"24"`
|
||||
TrailingField []byte `protobuf:"bytes,11,opt,name=trailing_field,json=trailingField,proto3" json:"trailing_field,omitempty" ssz-size:"56"`
|
||||
}
|
||||
|
||||
func (x *FixedTestContainer) Reset() {
|
||||
*x = FixedTestContainer{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_ssz_query_ssz_query_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FixedTestContainer) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FixedTestContainer) ProtoMessage() {}
|
||||
|
||||
func (x *FixedTestContainer) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_ssz_query_ssz_query_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FixedTestContainer.ProtoReflect.Descriptor instead.
|
||||
func (*FixedTestContainer) Descriptor() ([]byte, []int) {
|
||||
return file_proto_ssz_query_ssz_query_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *FixedTestContainer) GetFieldUint32() uint32 {
|
||||
if x != nil {
|
||||
return x.FieldUint32
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *FixedTestContainer) GetFieldUint64() uint64 {
|
||||
if x != nil {
|
||||
return x.FieldUint64
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *FixedTestContainer) GetFieldBool() bool {
|
||||
if x != nil {
|
||||
return x.FieldBool
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *FixedTestContainer) GetFieldBytes32() []byte {
|
||||
if x != nil {
|
||||
return x.FieldBytes32
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *FixedTestContainer) GetNested() *FixedNestedContainer {
|
||||
if x != nil {
|
||||
return x.Nested
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *FixedTestContainer) GetVectorField() []uint64 {
|
||||
if x != nil {
|
||||
return x.VectorField
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *FixedTestContainer) GetTrailingField() []byte {
|
||||
if x != nil {
|
||||
return x.TrailingField
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_proto_ssz_query_ssz_query_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_ssz_query_ssz_query_proto_rawDesc = []byte{
|
||||
0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72,
|
||||
0x79, 0x2f, 0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x12, 0x09, 0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x1a, 0x1b, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x65, 0x78, 0x74, 0x2f, 0x6f, 0x70, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4e, 0x0a, 0x14, 0x46, 0x69, 0x78,
|
||||
0x65, 0x64, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
|
||||
0x72, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x31, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x31, 0x12, 0x1e, 0x0a, 0x06, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x32, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33,
|
||||
0x32, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 0x22, 0xb9, 0x02, 0x0a, 0x12, 0x46, 0x69,
|
||||
0x78, 0x65, 0x64, 0x54, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
|
||||
0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x75, 0x69, 0x6e, 0x74, 0x33, 0x32,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x55, 0x69, 0x6e,
|
||||
0x74, 0x33, 0x32, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x75, 0x69, 0x6e,
|
||||
0x74, 0x36, 0x34, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64,
|
||||
0x55, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
|
||||
0x62, 0x6f, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c,
|
||||
0x64, 0x42, 0x6f, 0x6f, 0x6c, 0x12, 0x2b, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62,
|
||||
0x79, 0x74, 0x65, 0x73, 0x33, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5,
|
||||
0x18, 0x02, 0x33, 0x32, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x79, 0x74, 0x65, 0x73,
|
||||
0x33, 0x32, 0x12, 0x37, 0x0a, 0x06, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46,
|
||||
0x69, 0x78, 0x65, 0x64, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69,
|
||||
0x6e, 0x65, 0x72, 0x52, 0x06, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x0c, 0x76,
|
||||
0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28,
|
||||
0x04, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x32, 0x34, 0x52, 0x0b, 0x76, 0x65, 0x63, 0x74, 0x6f,
|
||||
0x72, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x2d, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69,
|
||||
0x6e, 0x67, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06,
|
||||
0x8a, 0xb5, 0x18, 0x02, 0x35, 0x36, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
|
||||
0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x4f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x4c, 0x61, 0x62, 0x73,
|
||||
0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
|
||||
0x73, 0x73, 0x7a, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_proto_ssz_query_ssz_query_proto_rawDescOnce sync.Once
|
||||
file_proto_ssz_query_ssz_query_proto_rawDescData = file_proto_ssz_query_ssz_query_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_proto_ssz_query_ssz_query_proto_rawDescGZIP() []byte {
|
||||
file_proto_ssz_query_ssz_query_proto_rawDescOnce.Do(func() {
|
||||
file_proto_ssz_query_ssz_query_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_ssz_query_ssz_query_proto_rawDescData)
|
||||
})
|
||||
return file_proto_ssz_query_ssz_query_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_proto_ssz_query_ssz_query_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_proto_ssz_query_ssz_query_proto_goTypes = []interface{}{
|
||||
(*FixedNestedContainer)(nil), // 0: ssz_query.FixedNestedContainer
|
||||
(*FixedTestContainer)(nil), // 1: ssz_query.FixedTestContainer
|
||||
}
|
||||
var file_proto_ssz_query_ssz_query_proto_depIdxs = []int32{
|
||||
0, // 0: ssz_query.FixedTestContainer.nested:type_name -> ssz_query.FixedNestedContainer
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_ssz_query_ssz_query_proto_init() }
|
||||
func file_proto_ssz_query_ssz_query_proto_init() {
|
||||
if File_proto_ssz_query_ssz_query_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_proto_ssz_query_ssz_query_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FixedNestedContainer); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_ssz_query_ssz_query_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FixedTestContainer); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_ssz_query_ssz_query_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_proto_ssz_query_ssz_query_proto_goTypes,
|
||||
DependencyIndexes: file_proto_ssz_query_ssz_query_proto_depIdxs,
|
||||
MessageInfos: file_proto_ssz_query_ssz_query_proto_msgTypes,
|
||||
}.Build()
|
||||
File_proto_ssz_query_ssz_query_proto = out.File
|
||||
file_proto_ssz_query_ssz_query_proto_rawDesc = nil
|
||||
file_proto_ssz_query_ssz_query_proto_goTypes = nil
|
||||
file_proto_ssz_query_ssz_query_proto_depIdxs = nil
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package ssz_query;
|
||||
|
||||
import "proto/eth/ext/options.proto";
|
||||
|
||||
option go_package = "github.com/OffchainLabs/prysm/v6/proto/ssz_query";
|
||||
|
||||
|
||||
// ===== FIXED-SIZE TEST CONTAINERS =====
|
||||
// These containers are designed to test SSZ query functionality with comprehensive coverage
|
||||
// of all fixed-size SSZ types according to the SSZ specification.
|
||||
|
||||
// FixedNestedContainer - nested container for testing nested field access
|
||||
// Tests: nested container navigation, field offset calculations within nested structures
|
||||
message FixedNestedContainer {
|
||||
uint64 value1 = 1; // Test: uint64 basic type, offset calculation in nested context
|
||||
bytes value2 = 2 [ (ethereum.eth.ext.ssz_size) = "32" ]; // Test: fixed-size bytes in nested container
|
||||
}
|
||||
|
||||
// FixedTestContainer - comprehensive fixed-size container for SSZ query testing
|
||||
// Tests: All basic fixed-size SSZ types, nested containers, vectors, offset/length calculations
|
||||
// Total size: 333 bytes (4+8+1+32+40+192+56)
|
||||
message FixedTestContainer {
|
||||
// Basic integer types - test different integer sizes and their SSZ serialization
|
||||
uint32 field_uint32 = 3; // Test: uint32 basic type, offset: 0
|
||||
uint64 field_uint64 = 4; // Test: uint64 basic type, offset: 4
|
||||
|
||||
// Boolean type - test boolean serialization (1 byte in SSZ)
|
||||
bool field_bool = 5; // Test: boolean basic type, offset: 12
|
||||
|
||||
// Fixed-size bytes - test byte array
|
||||
bytes field_bytes32 = 8 [ (ethereum.eth.ext.ssz_size) = "32" ]; // Test: 32-byte array, offset: 13
|
||||
|
||||
// Nested container - test container nesting and field access
|
||||
FixedNestedContainer nested = 9; // Test: nested container navigation (8+32=40 bytes), offset: 45
|
||||
|
||||
// Vector type - test fixed-size array of basic elements
|
||||
repeated uint64 vector_field = 10 [ (ethereum.eth.ext.ssz_size) = "24" ]; // Test: Vector[24] of uint64 (24*8=192 bytes), offset: 85
|
||||
|
||||
// Additional bytes field - test field ordering and offset calculation
|
||||
bytes trailing_field = 11 [ (ethereum.eth.ext.ssz_size) = "56" ]; // Test: trailing field after vector, offset: 277
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
// Code generated by fastssz. DO NOT EDIT.
|
||||
package ssz_query
|
||||
|
||||
import (
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
)
|
||||
|
||||
// MarshalSSZ ssz marshals the FixedNestedContainer object
|
||||
func (f *FixedNestedContainer) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(f)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the FixedNestedContainer object to a target array
|
||||
func (f *FixedNestedContainer) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'Value1'
|
||||
dst = ssz.MarshalUint64(dst, f.Value1)
|
||||
|
||||
// Field (1) 'Value2'
|
||||
if size := len(f.Value2); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.Value2", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, f.Value2...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the FixedNestedContainer object
|
||||
func (f *FixedNestedContainer) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 40 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'Value1'
|
||||
f.Value1 = ssz.UnmarshallUint64(buf[0:8])
|
||||
|
||||
// Field (1) 'Value2'
|
||||
if cap(f.Value2) == 0 {
|
||||
f.Value2 = make([]byte, 0, len(buf[8:40]))
|
||||
}
|
||||
f.Value2 = append(f.Value2, buf[8:40]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the FixedNestedContainer object
|
||||
func (f *FixedNestedContainer) SizeSSZ() (size int) {
|
||||
size = 40
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the FixedNestedContainer object
|
||||
func (f *FixedNestedContainer) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(f)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the FixedNestedContainer object with a hasher
|
||||
func (f *FixedNestedContainer) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'Value1'
|
||||
hh.PutUint64(f.Value1)
|
||||
|
||||
// Field (1) 'Value2'
|
||||
if size := len(f.Value2); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.Value2", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(f.Value2)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalSSZ ssz marshals the FixedTestContainer object
|
||||
func (f *FixedTestContainer) MarshalSSZ() ([]byte, error) {
|
||||
return ssz.MarshalSSZ(f)
|
||||
}
|
||||
|
||||
// MarshalSSZTo ssz marshals the FixedTestContainer object to a target array
|
||||
func (f *FixedTestContainer) MarshalSSZTo(buf []byte) (dst []byte, err error) {
|
||||
dst = buf
|
||||
|
||||
// Field (0) 'FieldUint32'
|
||||
dst = ssz.MarshalUint32(dst, f.FieldUint32)
|
||||
|
||||
// Field (1) 'FieldUint64'
|
||||
dst = ssz.MarshalUint64(dst, f.FieldUint64)
|
||||
|
||||
// Field (2) 'FieldBool'
|
||||
dst = ssz.MarshalBool(dst, f.FieldBool)
|
||||
|
||||
// Field (3) 'FieldBytes32'
|
||||
if size := len(f.FieldBytes32); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.FieldBytes32", size, 32)
|
||||
return
|
||||
}
|
||||
dst = append(dst, f.FieldBytes32...)
|
||||
|
||||
// Field (4) 'Nested'
|
||||
if f.Nested == nil {
|
||||
f.Nested = new(FixedNestedContainer)
|
||||
}
|
||||
if dst, err = f.Nested.MarshalSSZTo(dst); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (5) 'VectorField'
|
||||
if size := len(f.VectorField); size != 24 {
|
||||
err = ssz.ErrVectorLengthFn("--.VectorField", size, 24)
|
||||
return
|
||||
}
|
||||
for ii := 0; ii < 24; ii++ {
|
||||
dst = ssz.MarshalUint64(dst, f.VectorField[ii])
|
||||
}
|
||||
|
||||
// Field (6) 'TrailingField'
|
||||
if size := len(f.TrailingField); size != 56 {
|
||||
err = ssz.ErrBytesLengthFn("--.TrailingField", size, 56)
|
||||
return
|
||||
}
|
||||
dst = append(dst, f.TrailingField...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalSSZ ssz unmarshals the FixedTestContainer object
|
||||
func (f *FixedTestContainer) UnmarshalSSZ(buf []byte) error {
|
||||
var err error
|
||||
size := uint64(len(buf))
|
||||
if size != 333 {
|
||||
return ssz.ErrSize
|
||||
}
|
||||
|
||||
// Field (0) 'FieldUint32'
|
||||
f.FieldUint32 = ssz.UnmarshallUint32(buf[0:4])
|
||||
|
||||
// Field (1) 'FieldUint64'
|
||||
f.FieldUint64 = ssz.UnmarshallUint64(buf[4:12])
|
||||
|
||||
// Field (2) 'FieldBool'
|
||||
f.FieldBool, err = ssz.DecodeBool(buf[12:13])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Field (3) 'FieldBytes32'
|
||||
if cap(f.FieldBytes32) == 0 {
|
||||
f.FieldBytes32 = make([]byte, 0, len(buf[13:45]))
|
||||
}
|
||||
f.FieldBytes32 = append(f.FieldBytes32, buf[13:45]...)
|
||||
|
||||
// Field (4) 'Nested'
|
||||
if f.Nested == nil {
|
||||
f.Nested = new(FixedNestedContainer)
|
||||
}
|
||||
if err = f.Nested.UnmarshalSSZ(buf[45:85]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Field (5) 'VectorField'
|
||||
f.VectorField = ssz.ExtendUint64(f.VectorField, 24)
|
||||
for ii := 0; ii < 24; ii++ {
|
||||
f.VectorField[ii] = ssz.UnmarshallUint64(buf[85:277][ii*8 : (ii+1)*8])
|
||||
}
|
||||
|
||||
// Field (6) 'TrailingField'
|
||||
if cap(f.TrailingField) == 0 {
|
||||
f.TrailingField = make([]byte, 0, len(buf[277:333]))
|
||||
}
|
||||
f.TrailingField = append(f.TrailingField, buf[277:333]...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SizeSSZ returns the ssz encoded size in bytes for the FixedTestContainer object
|
||||
func (f *FixedTestContainer) SizeSSZ() (size int) {
|
||||
size = 333
|
||||
return
|
||||
}
|
||||
|
||||
// HashTreeRoot ssz hashes the FixedTestContainer object
|
||||
func (f *FixedTestContainer) HashTreeRoot() ([32]byte, error) {
|
||||
return ssz.HashWithDefaultHasher(f)
|
||||
}
|
||||
|
||||
// HashTreeRootWith ssz hashes the FixedTestContainer object with a hasher
|
||||
func (f *FixedTestContainer) HashTreeRootWith(hh *ssz.Hasher) (err error) {
|
||||
indx := hh.Index()
|
||||
|
||||
// Field (0) 'FieldUint32'
|
||||
hh.PutUint32(f.FieldUint32)
|
||||
|
||||
// Field (1) 'FieldUint64'
|
||||
hh.PutUint64(f.FieldUint64)
|
||||
|
||||
// Field (2) 'FieldBool'
|
||||
hh.PutBool(f.FieldBool)
|
||||
|
||||
// Field (3) 'FieldBytes32'
|
||||
if size := len(f.FieldBytes32); size != 32 {
|
||||
err = ssz.ErrBytesLengthFn("--.FieldBytes32", size, 32)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(f.FieldBytes32)
|
||||
|
||||
// Field (4) 'Nested'
|
||||
if err = f.Nested.HashTreeRootWith(hh); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Field (5) 'VectorField'
|
||||
{
|
||||
if size := len(f.VectorField); size != 24 {
|
||||
err = ssz.ErrVectorLengthFn("--.VectorField", size, 24)
|
||||
return
|
||||
}
|
||||
subIndx := hh.Index()
|
||||
for _, i := range f.VectorField {
|
||||
hh.AppendUint64(i)
|
||||
}
|
||||
hh.Merkleize(subIndx)
|
||||
}
|
||||
|
||||
// Field (6) 'TrailingField'
|
||||
if size := len(f.TrailingField); size != 56 {
|
||||
err = ssz.ErrBytesLengthFn("--.TrailingField", size, 56)
|
||||
return
|
||||
}
|
||||
hh.PutBytes(f.TrailingField)
|
||||
|
||||
hh.Merkleize(indx)
|
||||
return
|
||||
}
|
||||
@@ -151,14 +151,7 @@ func (s *PremineGenesisConfig) empty() (state.BeaconState, error) {
|
||||
return nil, err
|
||||
}
|
||||
case version.Deneb:
|
||||
e, err = state_native.InitializeFromProtoDeneb(ðpb.BeaconStateDeneb{
|
||||
BlockRoots: bRoots,
|
||||
StateRoots: sRoots,
|
||||
RandaoMixes: mixes,
|
||||
Balances: []uint64{},
|
||||
InactivityScores: []uint64{},
|
||||
Validators: []*ethpb.Validator{},
|
||||
})
|
||||
e, err = state_native.InitializeFromProtoDeneb(ðpb.BeaconStateDeneb{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v1.6.0-alpha.5
|
||||
version: v1.6.0-alpha.4
|
||||
style: full
|
||||
|
||||
specrefs:
|
||||
@@ -17,11 +17,6 @@ exceptions:
|
||||
- CELLS_PER_EXT_BLOB#fulu
|
||||
- UPDATE_TIMEOUT#altair
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH_GLOAS#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
- PTC_SIZE#gloas
|
||||
|
||||
constants:
|
||||
# Constants in the KZG library
|
||||
- BLS_MODULUS#deneb
|
||||
@@ -39,7 +34,6 @@ exceptions:
|
||||
- RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN#fulu
|
||||
|
||||
# Not implemented
|
||||
- BASIS_POINTS#phase0
|
||||
- ENDIANNESS#phase0
|
||||
- MAX_CONCURRENT_REQUESTS#phase0
|
||||
- PARTICIPATION_FLAG_WEIGHTS#altair
|
||||
@@ -48,57 +42,11 @@ exceptions:
|
||||
- UINT64_MAX#phase0
|
||||
- UINT64_MAX_SQRT#phase0
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- BUILDER_PAYMENT_THRESHOLD_DENOMINATOR#gloas
|
||||
- BUILDER_PAYMENT_THRESHOLD_NUMERATOR#gloas
|
||||
- BUILDER_PENDING_WITHDRAWALS_LIMIT#gloas
|
||||
- BUILDER_WITHDRAWAL_PREFIX#gloas
|
||||
- DOMAIN_BEACON_BUILDER#gloas
|
||||
- DOMAIN_PTC_ATTESTER#gloas
|
||||
- PAYLOAD_STATUS_EMPTY#gloas
|
||||
- PAYLOAD_STATUS_FULL#gloas
|
||||
- PAYLOAD_STATUS_PENDING#gloas
|
||||
|
||||
configs:
|
||||
# Not implemented (placeholders)
|
||||
- AGGREGRATE_DUE_BPS#phase0
|
||||
- ATTESTATION_DUE_BPS#phase0
|
||||
- CONTRIBUTION_DUE_BPS#altair
|
||||
- PROPOSER_REORG_CUTOFF_BPS#phase0
|
||||
- SLOT_DURATION_MS#phase0
|
||||
- SYNC_MESSAGE_DUE_BPS#altair
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- AGGREGRATE_DUE_BPS_GLOAS#gloas
|
||||
- ATTESTATION_DUE_BPS_GLOAS#gloas
|
||||
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
|
||||
- GLOAS_FORK_EPOCH#gloas
|
||||
- GLOAS_FORK_VERSION#gloas
|
||||
- MAX_REQUEST_PAYLOADS#gloas
|
||||
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
|
||||
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
|
||||
|
||||
ssz_objects:
|
||||
# Not implemented
|
||||
- Eth1Block#phase0
|
||||
- MatrixEntry#fulu
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- BeaconBlockBody#gloas
|
||||
- BeaconState#gloas
|
||||
- BuilderPendingPayment#gloas
|
||||
- BuilderPendingWithdrawal#gloas
|
||||
- DataColumnSidecar#gloas
|
||||
- ExecutionPayloadEnvelope#gloas
|
||||
- ExecutionPayloadHeader#gloas
|
||||
- ForkChoiceNode#gloas
|
||||
- IndexedPayloadAttestation#gloas
|
||||
- PayloadAttestation#gloas
|
||||
- PayloadAttestationData#gloas
|
||||
- PayloadAttestationMessage#gloas
|
||||
- SignedExecutionPayloadEnvelope#gloas
|
||||
- SignedExecutionPayloadHeader#gloas
|
||||
|
||||
dataclasses:
|
||||
# Not implemented
|
||||
- BlobParameters#fulu
|
||||
@@ -107,10 +55,6 @@ exceptions:
|
||||
- OptimisticStore#bellatrix
|
||||
- Store#phase0
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- LatestMessage#gloas
|
||||
- Store#gloas
|
||||
|
||||
functions:
|
||||
# Functions implemented by KZG library for EIP-4844
|
||||
- bit_reversal_permutation#deneb
|
||||
@@ -186,7 +130,6 @@ exceptions:
|
||||
- get_matching_target_attestations#phase0
|
||||
- get_proposer_head#phase0
|
||||
- get_proposer_score#phase0
|
||||
- get_slot_component_duration_ms#phase0
|
||||
- get_slot_signature#phase0
|
||||
- get_unslashed_attesting_indices#phase0
|
||||
- get_voting_source#phase0
|
||||
@@ -202,7 +145,6 @@ exceptions:
|
||||
- is_valid_merkle_branch#phase0
|
||||
- on_tick#phase0
|
||||
- on_tick_per_slot#phase0
|
||||
- seconds_to_milliseconds#phase0
|
||||
- store_target_checkpoint_state#phase0
|
||||
- update_latest_messages#phase0
|
||||
- validate_on_attestation#phase0
|
||||
@@ -285,64 +227,3 @@ exceptions:
|
||||
- get_data_column_sidecars_from_column_sidecar#fulu
|
||||
- get_extended_sample_count#fulu
|
||||
- recover_matrix#fulu
|
||||
|
||||
# Not implemented: gloas (future fork)
|
||||
- compute_balance_weighted_acceptance#gloas
|
||||
- compute_balance_weighted_selection#gloas
|
||||
- compute_fork_version#gloas
|
||||
- compute_proposer_indices#gloas
|
||||
- get_ancestor#gloas
|
||||
- get_attestation_participation_flag_indices#gloas
|
||||
- get_builder_payment_quorum_threshold#gloas
|
||||
- get_checkpoint_block#gloas
|
||||
- get_data_column_sidecars#gloas
|
||||
- get_data_column_sidecars_from_block#gloas
|
||||
- get_execution_payload_envelope_signature#gloas
|
||||
- get_execution_payload_header_signature#gloas
|
||||
- get_expected_withdrawals#gloas
|
||||
- get_forkchoice_store#gloas
|
||||
- get_head#gloas
|
||||
- get_indexed_payload_attestation#gloas
|
||||
- get_next_sync_committee_indices#gloas
|
||||
- get_node_children#gloas
|
||||
- get_parent_payload_status#gloas
|
||||
- get_payload_attestation_message_signature#gloas
|
||||
- get_payload_status_tiebreaker#gloas
|
||||
- get_ptc#gloas
|
||||
- get_ptc_assignment#gloas
|
||||
- get_weight#gloas
|
||||
- has_builder_withdrawal_credential#gloas
|
||||
- has_compounding_withdrawal_credential#gloas
|
||||
- is_attestation_same_slot#gloas
|
||||
- is_builder_payment_withdrawable#gloas
|
||||
- is_builder_withdrawal_credential#gloas
|
||||
- is_merge_transition_complete#gloas
|
||||
- is_parent_block_full#gloas
|
||||
- is_parent_node_full#gloas
|
||||
- is_payload_timely#gloas
|
||||
- is_supporting_vote#gloas
|
||||
- is_valid_indexed_payload_attestation#gloas
|
||||
- notify_ptc_messages#gloas
|
||||
- on_block#gloas
|
||||
- on_execution_payload#gloas
|
||||
- on_payload_attestation_message#gloas
|
||||
- prepare_execution_payload#gloas
|
||||
- process_attestation#gloas
|
||||
- process_block#gloas
|
||||
- process_builder_pending_payments#gloas
|
||||
- process_epoch#gloas
|
||||
- process_execution_payload#gloas
|
||||
- process_execution_payload_header#gloas
|
||||
- process_operations#gloas
|
||||
- process_payload_attestation#gloas
|
||||
- process_slot#gloas
|
||||
- process_withdrawals#gloas
|
||||
- remove_flag#gloas
|
||||
- should_extend_payload#gloas
|
||||
- update_latest_messages#gloas
|
||||
- upgrade_to_gloas#gloas
|
||||
- validate_merge_block#gloas
|
||||
- validate_on_attestation#gloas
|
||||
- verify_data_column_sidecar_inclusion_proof#gloas
|
||||
- verify_execution_payload_envelope_signature#gloas
|
||||
- verify_execution_payload_header_signature#gloas
|
||||
|
||||
@@ -18,13 +18,6 @@
|
||||
ALTAIR_FORK_VERSION: Version = '0x01000000'
|
||||
</spec>
|
||||
|
||||
- name: AGGREGRATE_DUE_BPS
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="AGGREGRATE_DUE_BPS" fork="phase0" hash="74073466">
|
||||
AGGREGRATE_DUE_BPS: uint64 = 6667
|
||||
</spec>
|
||||
|
||||
- name: ATTESTATION_PROPAGATION_SLOT_RANGE
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
@@ -65,13 +58,6 @@
|
||||
ATTESTATION_SUBNET_PREFIX_BITS: int = 6
|
||||
</spec>
|
||||
|
||||
- name: ATTESTATION_DUE_BPS
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="ATTESTATION_DUE_BPS" fork="phase0" hash="929dd1c9">
|
||||
ATTESTATION_DUE_BPS: uint64 = 3333
|
||||
</spec>
|
||||
|
||||
- name: BALANCE_PER_ADDITIONAL_CUSTODY_GROUP
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
@@ -543,13 +529,6 @@
|
||||
PROPOSER_SCORE_BOOST: uint64 = 40
|
||||
</spec>
|
||||
|
||||
- name: PROPOSER_REORG_CUTOFF_BPS
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="PROPOSER_REORG_CUTOFF_BPS" fork="phase0" hash="a487cc43">
|
||||
PROPOSER_REORG_CUTOFF_BPS: uint64 = 1667
|
||||
</spec>
|
||||
|
||||
- name: REORG_HEAD_WEIGHT_THRESHOLD
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
@@ -610,13 +589,6 @@
|
||||
SECONDS_PER_SLOT: uint64 = 12
|
||||
</spec>
|
||||
|
||||
- name: SLOT_DURATION_MS
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="SLOT_DURATION_MS" fork="phase0" hash="b6d4ba6d">
|
||||
SLOT_DURATION_MS: uint64 = 12000
|
||||
</spec>
|
||||
|
||||
- name: SHARD_COMMITTEE_PERIOD
|
||||
sources:
|
||||
- file: config/params/config.go
|
||||
@@ -676,18 +648,3 @@
|
||||
<spec config_var="VALIDATOR_CUSTODY_REQUIREMENT" fork="fulu" hash="4dfc4457">
|
||||
VALIDATOR_CUSTODY_REQUIREMENT = 8
|
||||
</spec>
|
||||
|
||||
- name: CONTRIBUTION_DUE_BPS
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="CONTRIBUTION_DUE_BPS" fork="altair" hash="a3808203">
|
||||
CONTRIBUTION_DUE_BPS: uint64 = 6667
|
||||
</spec>
|
||||
|
||||
- name: SYNC_MESSAGE_DUE_BPS
|
||||
sources: []
|
||||
spec: |
|
||||
<spec config_var="SYNC_MESSAGE_DUE_BPS" fork="altair" hash="791b29d8">
|
||||
SYNC_MESSAGE_DUE_BPS: uint64 = 3333
|
||||
</spec>
|
||||
|
||||
|
||||
@@ -9,13 +9,6 @@
|
||||
BASE_REWARDS_PER_EPOCH: uint64 = 4
|
||||
</spec>
|
||||
|
||||
- name: BASIS_POINTS
|
||||
sources: []
|
||||
spec: |
|
||||
<spec constant_var="BASIS_POINTS" fork="phase0" hash="cb0c8561">
|
||||
BASIS_POINTS: uint64 = 10000
|
||||
</spec>
|
||||
|
||||
- name: BLS_MODULUS
|
||||
sources: []
|
||||
spec: |
|
||||
|
||||
@@ -3209,17 +3209,6 @@
|
||||
return (store.time - store.genesis_time) // SECONDS_PER_SLOT
|
||||
</spec>
|
||||
|
||||
- name: get_slot_component_duration_ms
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="get_slot_component_duration_ms" fork="phase0" hash="b81504df">
|
||||
def get_slot_component_duration_ms(basis_points: uint64) -> uint64:
|
||||
"""
|
||||
Calculate the duration of a slot component in milliseconds.
|
||||
"""
|
||||
return basis_points * SLOT_DURATION_MS // BASIS_POINTS
|
||||
</spec>
|
||||
|
||||
- name: get_source_deltas
|
||||
sources:
|
||||
- file: beacon-chain/core/epoch/precompute/reward_penalty.go
|
||||
@@ -4251,12 +4240,12 @@
|
||||
- name: is_proposing_on_time
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="is_proposing_on_time" fork="phase0" hash="cadfde05">
|
||||
<spec fn="is_proposing_on_time" fork="phase0" hash="81d1985f">
|
||||
def is_proposing_on_time(store: Store) -> bool:
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
proposer_reorg_cutoff_ms = get_slot_component_duration_ms(PROPOSER_REORG_CUTOFF_BPS)
|
||||
return time_into_slot_ms <= proposer_reorg_cutoff_ms
|
||||
# Use half `SECONDS_PER_SLOT // INTERVALS_PER_SLOT` as the proposer reorg deadline
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
proposer_reorg_cutoff = SECONDS_PER_SLOT // INTERVALS_PER_SLOT // 2
|
||||
return time_into_slot <= proposer_reorg_cutoff
|
||||
</spec>
|
||||
|
||||
- name: is_shuffling_stable
|
||||
@@ -4706,7 +4695,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="phase0" hash="0bd081f0">
|
||||
<spec fn="on_block" fork="phase0" hash="f44d049a">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
block = signed_block.message
|
||||
# Parent block must be known
|
||||
@@ -4737,10 +4726,8 @@
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
attestation_threshold_ms = get_slot_component_duration_ms(ATTESTATION_DUE_BPS)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
@@ -4761,7 +4748,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="bellatrix" hash="cceac63a">
|
||||
<spec fn="on_block" fork="bellatrix" hash="1b2d9640">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4803,10 +4790,8 @@
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
attestation_threshold_ms = get_slot_component_duration_ms(ATTESTATION_DUE_BPS)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
@@ -4827,7 +4812,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="capella" hash="b051b7c6">
|
||||
<spec fn="on_block" fork="capella" hash="14995ab0">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4861,10 +4846,8 @@
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
attestation_threshold_ms = get_slot_component_duration_ms(ATTESTATION_DUE_BPS)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
@@ -4885,7 +4868,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="deneb" hash="1f48df4d">
|
||||
<spec fn="on_block" fork="deneb" hash="34d79a7b">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4909,7 +4892,9 @@
|
||||
|
||||
# [New in Deneb:EIP4844]
|
||||
# Check if blob data is available
|
||||
# If not, this payload MAY be queued and subsequently considered when blob data becomes available
|
||||
# If not, this block MAY be queued and subsequently considered when blob data becomes available
|
||||
# *Note*: Extraneous or invalid Blobs (in addition to the expected/referenced valid blobs)
|
||||
# received on the p2p network MUST NOT invalidate a block that is otherwise valid and available
|
||||
assert is_data_available(hash_tree_root(block), block.body.blob_kzg_commitments)
|
||||
|
||||
# Check the block is valid and compute the post-state
|
||||
@@ -4924,10 +4909,8 @@
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
attestation_threshold_ms = get_slot_component_duration_ms(ATTESTATION_DUE_BPS)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
@@ -4948,7 +4931,7 @@
|
||||
- file: beacon-chain/blockchain/receive_block.go
|
||||
search: func (s *Service) ReceiveBlock(
|
||||
spec: |
|
||||
<spec fn="on_block" fork="fulu" hash="a27a9edb">
|
||||
<spec fn="on_block" fork="fulu" hash="b01ca61f">
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
"""
|
||||
Run ``on_block`` upon receiving a new block.
|
||||
@@ -4973,8 +4956,6 @@
|
||||
assert store.finalized_checkpoint.root == finalized_checkpoint_block
|
||||
|
||||
# [Modified in Fulu:EIP7594]
|
||||
# Check if blob data is available
|
||||
# If not, this payload MAY be queued and subsequently considered when blob data becomes available
|
||||
assert is_data_available(hash_tree_root(block))
|
||||
|
||||
# Check the block is valid and compute the post-state
|
||||
@@ -4987,10 +4968,8 @@
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add block timeliness to the store
|
||||
seconds_since_genesis = store.time - store.genesis_time
|
||||
time_into_slot_ms = seconds_to_milliseconds(seconds_since_genesis) % SLOT_DURATION_MS
|
||||
attestation_threshold_ms = get_slot_component_duration_ms(ATTESTATION_DUE_BPS)
|
||||
is_before_attesting_interval = time_into_slot_ms < attestation_threshold_ms
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
@@ -7561,20 +7540,6 @@
|
||||
assert block.state_root == hash_tree_root(state)
|
||||
</spec>
|
||||
|
||||
- name: seconds_to_milliseconds
|
||||
sources: []
|
||||
spec: |
|
||||
<spec fn="seconds_to_milliseconds" fork="phase0" hash="b2cc9743">
|
||||
def seconds_to_milliseconds(seconds: uint64) -> uint64:
|
||||
"""
|
||||
Convert seconds to milliseconds with overflow protection.
|
||||
Returns ``UINT64_MAX`` if the result would overflow.
|
||||
"""
|
||||
if seconds > UINT64_MAX // 1000:
|
||||
return UINT64_MAX
|
||||
return seconds * 1000
|
||||
</spec>
|
||||
|
||||
- name: store_target_checkpoint_state
|
||||
sources: []
|
||||
spec: |
|
||||
@@ -8607,16 +8572,17 @@
|
||||
- file: beacon-chain/core/peerdas/p2p_interface.go
|
||||
search: func VerifyDataColumnSidecarInclusionProof(
|
||||
spec: |
|
||||
<spec fn="verify_data_column_sidecar_inclusion_proof" fork="fulu" hash="aaa9b8d8">
|
||||
<spec fn="verify_data_column_sidecar_inclusion_proof" fork="fulu" hash="a6757e5e">
|
||||
def verify_data_column_sidecar_inclusion_proof(sidecar: DataColumnSidecar) -> bool:
|
||||
"""
|
||||
Verify if the given KZG commitments included in the given beacon block.
|
||||
"""
|
||||
gindex = get_subtree_index(get_generalized_index(BeaconBlockBody, "blob_kzg_commitments"))
|
||||
return is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(sidecar.kzg_commitments),
|
||||
branch=sidecar.kzg_commitments_inclusion_proof,
|
||||
depth=KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH,
|
||||
index=get_subtree_index(get_generalized_index(BeaconBlockBody, "blob_kzg_commitments")),
|
||||
index=gindex,
|
||||
root=sidecar.signed_block_header.message.body_root,
|
||||
)
|
||||
</spec>
|
||||
|
||||
@@ -270,15 +270,11 @@ func (node *BeaconNode) Start(ctx context.Context) error {
|
||||
fmt.Sprintf("--%s=%d", flags.BlockBatchLimitBurstFactor.Name, 8),
|
||||
fmt.Sprintf("--%s=%d", flags.BlobBatchLimitBurstFactor.Name, 16),
|
||||
fmt.Sprintf("--%s=%d", flags.BlobBatchLimit.Name, 256),
|
||||
fmt.Sprintf("--%s=%d", flags.DataColumnBatchLimit.Name, 8192),
|
||||
fmt.Sprintf("--%s=%d", flags.DataColumnBatchLimitBurstFactor.Name, 2),
|
||||
fmt.Sprintf("--%s=%s", cmdshared.ChainConfigFileFlag.Name, cfgPath),
|
||||
"--" + cmdshared.ValidatorMonitorIndicesFlag.Name + "=1",
|
||||
"--" + cmdshared.ValidatorMonitorIndicesFlag.Name + "=2",
|
||||
"--" + cmdshared.ForceClearDB.Name,
|
||||
"--" + cmdshared.AcceptTosFlag.Name,
|
||||
"--" + flags.SubscribeToAllSubnets.Name,
|
||||
fmt.Sprintf("--%s=%d", features.DataColumnsWithholdCount.Name, 3),
|
||||
}
|
||||
if config.UsePprof {
|
||||
args = append(args, "--pprof", fmt.Sprintf("--pprofport=%d", e2e.TestParams.Ports.PrysmBeaconNodePprofPort+index))
|
||||
|
||||
@@ -22,7 +22,7 @@ func e2eMinimal(t *testing.T, cfg *params.BeaconChainConfig, cfgo ...types.E2ECo
|
||||
|
||||
// Run for 12 epochs if not in long-running to confirm long-running has no issues.
|
||||
var err error
|
||||
epochsToRun := 6
|
||||
epochsToRun := 16
|
||||
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
|
||||
if longRunning {
|
||||
epochsToRun, err = strconv.Atoi(epochStr)
|
||||
@@ -37,6 +37,27 @@ func e2eMinimal(t *testing.T, cfg *params.BeaconChainConfig, cfgo ...types.E2ECo
|
||||
tracingPort := e2eParams.TestParams.Ports.JaegerTracingPort
|
||||
tracingEndpoint := fmt.Sprintf("127.0.0.1:%d", tracingPort)
|
||||
evals := []types.Evaluator{
|
||||
ev.PeersConnect,
|
||||
ev.HealthzCheck,
|
||||
ev.MetricsCheck,
|
||||
ev.ValidatorsAreActive,
|
||||
ev.ValidatorsParticipatingAtEpoch(2),
|
||||
ev.FinalizationOccurs(3),
|
||||
ev.VerifyBlockGraffiti,
|
||||
ev.PeersCheck,
|
||||
ev.ProposeVoluntaryExit,
|
||||
ev.ValidatorsHaveExited,
|
||||
ev.SubmitWithdrawal,
|
||||
ev.ValidatorsHaveWithdrawn,
|
||||
ev.ProcessesDepositsInBlocks,
|
||||
ev.ActivatesDepositedValidators,
|
||||
ev.DepositedValidatorsAreActive,
|
||||
ev.ValidatorsVoteWithTheMajority,
|
||||
ev.ColdStateCheckpoint,
|
||||
ev.FinishedSyncing,
|
||||
ev.AllNodesHaveSameHead,
|
||||
ev.ValidatorSyncParticipation,
|
||||
ev.FeeRecipientIsPresent,
|
||||
//ev.TransactionsPresent, TODO: Re-enable Transaction evaluator once it tx pool issues are fixed.
|
||||
}
|
||||
evals = addIfForkSet(evals, cfg.AltairForkEpoch, ev.AltairForkTransition)
|
||||
@@ -82,7 +103,7 @@ func e2eMainnet(t *testing.T, usePrysmSh, useMultiClient bool, cfg *params.Beaco
|
||||
} else {
|
||||
require.NoError(t, e2eParams.Init(t, e2eParams.StandardBeaconCount))
|
||||
}
|
||||
// Run for 14 epochs if not in long-running to confirm long-running has no issues.
|
||||
// Run for 10 epochs if not in long-running to confirm long-running has no issues.
|
||||
var err error
|
||||
epochsToRun := 16
|
||||
epochStr, longRunning := os.LookupEnv("E2E_EPOCHS")
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
e2e "github.com/OffchainLabs/prysm/v6/testing/endtoend/params"
|
||||
@@ -29,14 +28,8 @@ const maxMemStatsBytes = 2000000000 // 2 GiB.
|
||||
// MetricsCheck performs a check on metrics to make sure caches are functioning, and
|
||||
// overall health is good. Not checking the first epoch so the sample size isn't too small.
|
||||
var MetricsCheck = types.Evaluator{
|
||||
Name: "metrics_check_epoch_%d",
|
||||
Policy: func(currentEpoch primitives.Epoch) bool {
|
||||
// Hack to allow slow block proposal times to pass E2E
|
||||
if currentEpoch >= params.BeaconConfig().DenebForkEpoch {
|
||||
return false
|
||||
}
|
||||
return policies.AfterNthEpoch(0)(currentEpoch)
|
||||
},
|
||||
Name: "metrics_check_epoch_%d",
|
||||
Policy: policies.AfterNthEpoch(0),
|
||||
Evaluation: metricsTest,
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,8 @@ var ValidatorsParticipatingAtEpoch = func(epoch primitives.Epoch) types.Evaluato
|
||||
var ValidatorSyncParticipation = types.Evaluator{
|
||||
Name: "validator_sync_participation_%d",
|
||||
Policy: func(e primitives.Epoch) bool {
|
||||
return false
|
||||
fEpoch := params.BeaconConfig().AltairForkEpoch
|
||||
return policies.OnwardsNthEpoch(fEpoch)(e)
|
||||
},
|
||||
Evaluation: validatorsSyncParticipation,
|
||||
}
|
||||
|
||||
@@ -9,6 +9,6 @@ import (
|
||||
)
|
||||
|
||||
func TestEndToEnd_MinimalConfig(t *testing.T) {
|
||||
r := e2eMinimal(t, types.InitForkCfg(version.Deneb, version.Deneb, params.E2ETestConfig()), types.WithCheckpointSync())
|
||||
r := e2eMinimal(t, types.InitForkCfg(version.Bellatrix, version.Electra, params.E2ETestConfig()), types.WithCheckpointSync())
|
||||
r.run()
|
||||
}
|
||||
|
||||
@@ -94,9 +94,6 @@ type E2EConfig struct {
|
||||
|
||||
func GenesisFork() int {
|
||||
cfg := params.BeaconConfig()
|
||||
if cfg.DenebForkEpoch == 0 {
|
||||
return version.Deneb
|
||||
}
|
||||
if cfg.CapellaForkEpoch == 0 {
|
||||
return version.Capella
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -137,11 +138,9 @@ func (v *validator) LogSubmittedAtts(slot primitives.Slot) {
|
||||
|
||||
// LogSubmittedSyncCommitteeMessages logs info about submitted sync committee messages.
|
||||
func (v *validator) LogSubmittedSyncCommitteeMessages() {
|
||||
if count := v.syncCommitteeStats.totalMessagesSubmitted.Load(); count > 0 {
|
||||
log.WithField("messages", count).
|
||||
Debug("Submitted sync committee messages successfully to beacon node")
|
||||
|
||||
if v.syncCommitteeStats.totalMessagesSubmitted > 0 {
|
||||
log.WithField("messages", v.syncCommitteeStats.totalMessagesSubmitted).Debug("Submitted sync committee messages successfully to beacon node")
|
||||
// Reset the amount.
|
||||
v.syncCommitteeStats.totalMessagesSubmitted.Store(0)
|
||||
atomic.StoreUint64(&v.syncCommitteeStats.totalMessagesSubmitted, 0)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package client
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
@@ -93,7 +94,7 @@ func (v *validator) SubmitSyncCommitteeMessage(ctx context.Context, slot primiti
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(msg.BlockRoot)),
|
||||
"validatorIndex": msg.ValidatorIndex,
|
||||
}).Info("Submitted new sync message")
|
||||
v.syncCommitteeStats.totalMessagesSubmitted.Add(1)
|
||||
atomic.AddUint64(&v.syncCommitteeStats.totalMessagesSubmitted, 1)
|
||||
}
|
||||
|
||||
// SubmitSignedContributionAndProof submits the signed sync committee contribution and proof to the beacon chain.
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -1572,5 +1571,5 @@ type voteStats struct {
|
||||
|
||||
// This tracks all validators' submissions for sync committees.
|
||||
type syncCommitteeStats struct {
|
||||
totalMessagesSubmitted atomic.Uint64
|
||||
totalMessagesSubmitted uint64
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user