Compare commits

..

49 Commits

Author SHA1 Message Date
Kasey Kirkham
2b8384ff0b adapt to all the divergent changes between peerDAS and peerdas-sync 2025-08-15 22:16:27 -05:00
Kasey
b0cc1ec0aa DataColumnSidecar backfill 2025-08-15 20:23:50 -05:00
Manu NALEPA
6e02170881 Add rate limiter check. 2025-08-15 10:28:55 +02:00
Manu NALEPA
bf6007c5f0 Fix Potuz's and Preston's comment. 2025-08-15 09:06:36 +02:00
Manu NALEPA
22a6820ede Add extra flags 2025-08-15 01:48:16 +02:00
Manu NALEPA
00ab5a1051 Add DataColumnStorage and SubscribeAllDataSubnets flag. 2025-08-15 00:25:34 +02:00
Manu NALEPA
41cf0ec59e Fix James' comment. 2025-08-14 17:04:03 +02:00
Manu NALEPA
39ca2eaae1 Merge branch 'develop' into peerdas-sync 2025-08-14 14:27:28 +02:00
Manu NALEPA
d5c56355f8 Fix Potuz's comment. 2025-08-14 14:03:08 +02:00
Manu NALEPA
5a3e450067 Fix Potuz's comment. 2025-08-14 14:01:20 +02:00
Manu NALEPA
e50472ab25 Fix typo. 2025-08-14 09:34:31 +02:00
Manu NALEPA
53d69d407b selectPeers: Avoid map with key but empty value. 2025-08-14 09:28:11 +02:00
Manu NALEPA
58687620f6 Fix James' comment. 2025-08-13 22:30:33 +02:00
Manu NALEPA
d36875332d Fix Preston's comment. 2025-08-13 16:00:13 +02:00
Manu NALEPA
0a9ff2dc8b Fix James' comment. 2025-08-13 15:04:51 +02:00
Manu NALEPA
cc8a5fc422 Revert "Fix James' comment."
This reverts commit a3f919205a.
2025-08-13 15:03:15 +02:00
Manu NALEPA
9dcb8be2df Fix Potuz's comment. 2025-08-13 15:01:31 +02:00
Manu NALEPA
51e465d690 Revert "Fix Potuz's comment."
This reverts commit c45230b455.
2025-08-13 14:15:27 +02:00
Manu NALEPA
ddbf9cb404 Implement TestFetchDataColumnSidecars. 2025-08-13 12:00:50 +02:00
Manu NALEPA
9ed1496c8f Fix James's comment. 2025-08-13 09:14:25 +02:00
Manu NALEPA
5b04cab118 Fix Preston's comment. 2025-08-13 01:11:22 +02:00
Manu NALEPA
0b681f6861 Update cmd/beacon-chain/flags/config.go
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2025-08-13 01:00:19 +02:00
Manu NALEPA
88b363e3cf Merge branch 'develop' into peerdas-sync 2025-08-13 00:45:31 +02:00
Manu NALEPA
f521948b4d Fix flakiness in TestSelectPeers. 2025-08-12 23:14:56 +02:00
Manu NALEPA
a3f919205a Fix James' comment. 2025-08-12 22:44:26 +02:00
Manu NALEPA
474f458834 Fix James' comment. 2025-08-12 22:35:48 +02:00
Manu NALEPA
bd17dfefb9 Fix James' commit. 2025-08-12 22:17:23 +02:00
Manu NALEPA
3f361a79a0 Fix James's comment. 2025-08-12 22:10:33 +02:00
Manu NALEPA
09b7fa6bc9 Fix James' comment. 2025-08-12 22:08:33 +02:00
Manu NALEPA
76d974694c Implement TestCategorizeIndices. 2025-08-12 15:47:29 +02:00
Manu NALEPA
dca7f282e6 Implement TestSelectPeers. 2025-08-12 15:11:33 +02:00
Manu NALEPA
327309b5f7 Implement TestUpdateResults. 2025-08-12 13:45:45 +02:00
Manu NALEPA
8c44999d21 Implement TestFetchDataColumnSidecarsFromPeers. 2025-08-12 12:23:20 +02:00
Manu NALEPA
bb2fd617a5 Implement TestSendDataColumnSidecarsRequest. 2025-08-12 11:59:30 +02:00
Manu NALEPA
c31c1e2674 Fix Satyajit's comment. 2025-08-12 10:09:39 +02:00
Manu NALEPA
978ffa4780 Add tests for sendDataColumnSidecarsRequest. 2025-08-11 00:35:40 +02:00
Manu NALEPA
407bf6785f Fix Potuz's comment. 2025-08-10 23:07:56 +02:00
Manu NALEPA
c45230b455 Fix Potuz's comment. 2025-08-10 22:47:00 +02:00
Manu NALEPA
0af6591001 Fix Potuz's comment. 2025-08-10 22:41:00 +02:00
Manu NALEPA
1af249da31 Fix Potuz's comment. 2025-08-10 22:02:22 +02:00
Manu NALEPA
901f6b6e6c Fix Potuz's comment. 2025-08-10 21:59:17 +02:00
Manu NALEPA
a3cdda56d9 Fix Potuz's comment. 2025-08-10 21:53:54 +02:00
Manu NALEPA
cf3200fa06 Fix Potuz's comment. 2025-08-10 21:47:55 +02:00
Manu NALEPA
04cafa1959 Partially fix Potuz's comment. 2025-08-10 21:39:27 +02:00
Manu NALEPA
498b945a61 Fix Satyajit's comment. 2025-08-10 21:35:39 +02:00
Manu NALEPA
90317ba5b5 Fix Potuz's comment. 2025-08-09 01:24:00 +02:00
Manu NALEPA
b35358f440 Fix Potuz's comment. 2025-08-09 00:47:55 +02:00
Manu NALEPA
b926066495 Fix Potuz's comment. 2025-08-08 20:59:28 +02:00
Manu NALEPA
263ddf9a7b PeerDAS: Implement sync 2025-08-08 20:05:12 +02:00
279 changed files with 4563 additions and 22950 deletions

View File

@@ -1,6 +1,6 @@
name: 🐞 Bug report
description: Report a bug or problem with running Prysm
type: "Bug"
labels: ["Bug"]
body:
- type: markdown
attributes:

View File

@@ -1,43 +0,0 @@
name: Check Spec References
on: [push, pull_request]
jobs:
check-specrefs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Check version consistency
run: |
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
echo "Version mismatch between WORKSPACE and ethspecify"
echo " WORKSPACE: $WORKSPACE_VERSION"
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
exit 1
else
echo "Versions match: $WORKSPACE_VERSION"
fi
- name: Install ethspecify
run: python3 -mpip install ethspecify
- name: Update spec references
run: ethspecify process --path=specrefs
- name: Check for differences
run: |
if ! git diff --exit-code specrefs >/dev/null; then
echo "Spec references are out-of-date!"
echo ""
git --no-pager diff specrefs
exit 1
else
echo "Spec references are up-to-date!"
fi
- name: Check spec references
run: ethspecify check --path=specrefs

View File

@@ -2993,7 +2993,7 @@ There are two known issues with this release:
### Added
- Web3Signer support. See the [documentation](https://prysm.offchainlabs.com/docs/manage-wallet/web3signer/) for more
- Web3Signer support. See the [documentation](https://docs.prylabs.network/docs/next/wallet/web3signer) for more
details.
- Bellatrix support. See [kiln testnet instructions](https://hackmd.io/OqIoTiQvS9KOIataIFksBQ?view)
- Weak subjectivity sync / checkpoint sync. This is an experimental feature and may have unintended side effects for

View File

@@ -2,7 +2,7 @@
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
to read [Why Bazel?](https://prysm.offchainlabs.com/docs/install-prysm/install-with-bazel/#why-bazel) to fully
to read [Why Bazel?](https://github.com/OffchainLabs/documentation/issues/138) to fully
understand the reasoning behind an additional layer of build tooling via Bazel rather than a pure
"go build" project.

View File

@@ -253,16 +253,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.6.0-alpha.6"
consensus_spec_version = "v1.6.0-alpha.1"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-7wkWuahuCO37uVYnxq8Badvi+jY907pBj68ixL8XDOI=",
"minimal": "sha256-Qy/f27N0LffS/ej7VhIubwDejD6LMK0VdenKkqtZVt4=",
"mainnet": "sha256-3H7mu5yE+FGz2Wr/nc8Nd9aEu93YoEpsYtn0zBSoeDE=",
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
},
version = consensus_spec_version,
)
@@ -278,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-uvz3XfMTGfy3/BtQQoEp5XQOgrWgcH/5Zo/gR0iiP+k=",
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -1,9 +1,10 @@
package httprest
import (
"net/http"
"time"
"net/http"
"github.com/OffchainLabs/prysm/v6/api/server/middleware"
)

View File

@@ -29,24 +29,3 @@ func RunEvery(ctx context.Context, period time.Duration, f func()) {
}
}()
}
// RunEveryDynamic runs the provided command periodically with a dynamic interval.
// The interval is determined by calling the intervalFunc before each execution.
// It runs in a goroutine, and can be cancelled by finishing the supplied context.
func RunEveryDynamic(ctx context.Context, intervalFunc func() time.Duration, f func()) {
go func() {
for {
// Get the next interval duration
interval := intervalFunc()
timer := time.NewTimer(interval)
select {
case <-timer.C:
f()
case <-ctx.Done():
timer.Stop()
return
}
}
}()
}

View File

@@ -38,51 +38,3 @@ func TestEveryRuns(t *testing.T) {
t.Error("Counter incremented after stop")
}
}
func TestEveryDynamicRuns(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
i := int32(0)
intervalCount := int32(0)
// Start with 50ms intervals, then increase to 100ms after 2 calls
async.RunEveryDynamic(ctx, func() time.Duration {
count := atomic.LoadInt32(&intervalCount)
atomic.AddInt32(&intervalCount, 1)
if count < 2 {
return 50 * time.Millisecond
}
return 100 * time.Millisecond
}, func() {
atomic.AddInt32(&i, 1)
})
// After 150ms, should have run at least 2 times (at 50ms and 100ms)
time.Sleep(150 * time.Millisecond)
count1 := atomic.LoadInt32(&i)
if count1 < 2 {
t.Errorf("Expected at least 2 runs after 150ms, got %d", count1)
}
// After another 150ms (total 300ms), should have run at least 3 times
// (50ms, 100ms, 150ms, 250ms)
time.Sleep(150 * time.Millisecond)
count2 := atomic.LoadInt32(&i)
if count2 < 3 {
t.Errorf("Expected at least 3 runs after 300ms, got %d", count2)
}
cancel()
// Sleep for a bit to let the cancel take place.
time.Sleep(100 * time.Millisecond)
last := atomic.LoadInt32(&i)
// Sleep for a bit and ensure the value has not increased.
time.Sleep(200 * time.Millisecond)
if atomic.LoadInt32(&i) != last {
t.Error("Counter incremented after stop")
}
}

View File

@@ -102,6 +102,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
for i := range cells {
ckzgCells[i] = ckzg4844.Cell(cells[i])
}
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
}

View File

@@ -1,30 +1,10 @@
package kzg
import (
"fmt"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
GoKZG "github.com/crate-crypto/go-kzg-4844"
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
"github.com/pkg/errors"
)
func bytesToBlob(blob []byte) *GoKZG.Blob {
var ret GoKZG.Blob
copy(ret[:], blob)
return &ret
}
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
copy(ret[:], commitment)
return
}
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
copy(ret[:], proof)
return
}
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
func Verify(blobSidecars ...blocks.ROBlob) error {
if len(blobSidecars) == 0 {
@@ -47,121 +27,18 @@ func Verify(blobSidecars ...blocks.ROBlob) error {
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
}
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
// This is more efficient than verifying each blob individually when len(blobs) > 1.
// For single blob verification, it uses the optimized single verification path.
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
}
if len(blobs) == 0 {
return nil
}
// Optimize for single blob case - use single verification to avoid batch overhead
if len(blobs) == 1 {
return kzgContext.VerifyBlobKZGProof(
bytesToBlob(blobs[0]),
bytesToCommitment(commitments[0]),
bytesToKZGProof(proofs[0]))
}
// Use batch verification for multiple blobs
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
for i := range blobs {
if len(blobs[i]) != len(ckzg4844.Blob{}) {
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
}
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
}
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
}
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
}
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
if err != nil {
return errors.Wrap(err, "batch verification")
}
if !valid {
return errors.New("batch KZG proof verification failed")
}
return nil
func bytesToBlob(blob []byte) *GoKZG.Blob {
var ret GoKZG.Blob
copy(ret[:], blob)
return &ret
}
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
blobCount := uint64(len(blobs))
expectedCellProofs := blobCount * numberOfColumns
if uint64(len(cellProofs)) != expectedCellProofs {
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
}
if len(commitments) != len(blobs) {
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
}
if blobCount == 0 {
return nil
}
// Handle multiple blobs - compute cells for all blobs
allCells := make([]Cell, 0, expectedCellProofs)
allCommitments := make([]Bytes48, 0, expectedCellProofs)
allIndices := make([]uint64, 0, expectedCellProofs)
allProofs := make([]Bytes48, 0, expectedCellProofs)
for blobIndex := range blobs {
if len(blobs[blobIndex]) != len(Blob{}) {
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
}
// Convert blob to kzg.Blob type
blob := Blob(blobs[blobIndex])
// Compute cells for this blob
cells, err := ComputeCells(&blob)
if err != nil {
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
}
// Add cells and corresponding data for each column
for columnIndex := range numberOfColumns {
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
if len(commitments[blobIndex]) != len(Bytes48{}) {
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
}
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
}
allCells = append(allCells, cells[columnIndex])
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
allIndices = append(allIndices, columnIndex)
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
}
}
// Batch verify all cells
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
if err != nil {
return errors.Wrap(err, "cell batch verification")
}
if !valid {
return errors.New("cell KZG proof batch verification failed")
}
return nil
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
copy(ret[:], commitment)
return
}
func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
copy(ret[:], proof)
return
}

View File

@@ -37,7 +37,6 @@ func TestBytesToAny(t *testing.T) {
}
func TestGenerateCommitmentAndProof(t *testing.T) {
require.NoError(t, Start())
blob := random.GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
@@ -46,432 +45,3 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
require.Equal(t, expectedCommitment, commitment)
require.Equal(t, expectedProof, proof)
}
func TestVerifyBlobKZGProofBatch(t *testing.T) {
// Initialize KZG for testing
require.NoError(t, Start())
t.Run("valid single blob batch", func(t *testing.T) {
blob := random.GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
proofs := [][]byte{proof[:]}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.NoError(t, err)
})
t.Run("valid multiple blob batch", func(t *testing.T) {
blobCount := 3
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
proofs := make([][]byte, blobCount)
for i := 0; i < blobCount; i++ {
blob := random.GetRandBlob(int64(i))
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
blobs[i] = blob[:]
commitments[i] = commitment[:]
proofs[i] = proof[:]
}
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.NoError(t, err)
})
t.Run("empty inputs should pass", func(t *testing.T) {
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
require.NoError(t, err)
})
t.Run("mismatched input lengths", func(t *testing.T) {
blob := random.GetRandBlob(123)
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
// Test different mismatch scenarios
err = VerifyBlobKZGProofBatch(
[][]byte{blob[:]},
[][]byte{},
[][]byte{proof[:]},
)
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
err = VerifyBlobKZGProofBatch(
[][]byte{blob[:], blob[:]},
[][]byte{commitment[:]},
[][]byte{proof[:], proof[:]},
)
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
})
t.Run("invalid commitment should fail", func(t *testing.T) {
blob := random.GetRandBlob(123)
_, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
// Use a different blob's commitment (mismatch)
differentBlob := random.GetRandBlob(456)
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{wrongCommitment[:]}
proofs := [][]byte{proof[:]}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
// Single blob optimization uses different error message
require.ErrorContains(t, "can't verify opening proof", err)
})
t.Run("invalid proof should fail", func(t *testing.T) {
blob := random.GetRandBlob(123)
commitment, _, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
// Use wrong proof
invalidProof := make([]byte, 48) // All zeros
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
proofs := [][]byte{invalidProof}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.ErrorContains(t, "short buffer", err)
})
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
// First blob - valid
blob1 := random.GetRandBlob(123)
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
require.NoError(t, err)
// Second blob - invalid proof
blob2 := random.GetRandBlob(456)
commitment2, _, err := GenerateCommitmentAndProof(blob2)
require.NoError(t, err)
invalidProof := make([]byte, 48) // All zeros
blobs := [][]byte{blob1[:], blob2[:]}
commitments := [][]byte{commitment1[:], commitment2[:]}
proofs := [][]byte{proof1[:], invalidProof}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.ErrorContains(t, "batch verification", err)
})
t.Run("batch KZG proof verification failed", func(t *testing.T) {
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
blob1 := random.GetRandBlob(123)
blob2 := random.GetRandBlob(456)
// Generate valid proof for blob1
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
require.NoError(t, err)
// Generate valid proof for blob2 but use wrong commitment (from blob1)
_, proof2, err := GenerateCommitmentAndProof(blob2)
require.NoError(t, err)
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
blobs := [][]byte{blob1[:], blob2[:]}
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
proofs := [][]byte{proof1[:], proof2[:]}
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
require.ErrorContains(t, "batch KZG proof verification failed", err)
})
}
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
// Initialize KZG for testing
require.NoError(t, Start())
t.Run("valid single blob cell verification", func(t *testing.T) {
numberOfColumns := uint64(128)
// Generate blob and commitment
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
// Compute cells and proofs
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Create flattened cell proofs (like execution client format)
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = cellsAndProofs.Proofs[i][:]
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.NoError(t, err)
})
t.Run("valid multiple blob cell verification", func(t *testing.T) {
numberOfColumns := uint64(128)
blobCount := 2
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
for i := range blobCount {
// Generate blob and commitment
randBlob := random.GetRandBlob(int64(i))
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
// Compute cells and proofs
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
blobs[i] = blob[:]
commitments[i] = commitment[:]
// Add cell proofs for this blob
for j := range numberOfColumns {
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
}
}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
require.NoError(t, err)
})
t.Run("empty inputs should pass", func(t *testing.T) {
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
require.NoError(t, err)
})
t.Run("mismatched blob and commitment count", func(t *testing.T) {
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
err := VerifyCellKZGProofBatchFromBlobData(
[][]byte{blob[:]},
[][]byte{}, // Empty commitments
[][]byte{},
128,
)
require.ErrorContains(t, "expected 128 cell proofs", err)
})
t.Run("wrong cell proof count", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
wrongCellProofs := make([][]byte, 10)
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
})
t.Run("invalid cell proofs should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
// Create invalid cell proofs (all zeros)
invalidCellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
invalidCellProofs[i] = make([]byte, 48) // All zeros
}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
require.ErrorContains(t, "cell batch verification", err)
})
t.Run("mismatched commitment should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
// Generate blob and correct cell proofs
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
require.NoError(t, err)
// Generate wrong commitment from different blob
randBlob2 := random.GetRandBlob(456)
var differentBlob Blob
copy(differentBlob[:], randBlob2[:])
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
require.NoError(t, err)
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = cellsAndProofs.Proofs[i][:]
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{wrongCommitment[:]}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
})
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
numberOfColumns := uint64(128)
// Create invalid blob (not properly formatted)
invalidBlobData := make([]byte, 10) // Too short
commitment := make([]byte, 48) // Dummy commitment
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = make([]byte, 48)
}
blobs := [][]byte{invalidBlobData}
commitments := [][]byte{commitment}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.NotNil(t, err)
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
})
t.Run("invalid commitment size should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
// Create invalid commitment (wrong size)
invalidCommitment := make([]byte, 32) // Should be 48 bytes
cellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
cellProofs[i] = make([]byte, 48)
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{invalidCommitment}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
})
t.Run("invalid cell proof size should fail", func(t *testing.T) {
numberOfColumns := uint64(128)
randBlob := random.GetRandBlob(123)
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
// Create invalid cell proofs (wrong size)
invalidCellProofs := make([][]byte, numberOfColumns)
for i := range numberOfColumns {
if i == 0 {
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
} else {
invalidCellProofs[i] = make([]byte, 48)
}
}
blobs := [][]byte{blob[:]}
commitments := [][]byte{commitment[:]}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
})
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
numberOfColumns := uint64(128)
blobCount := 2
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
// First blob - valid
randBlob1 := random.GetRandBlob(123)
var blob1 Blob
copy(blob1[:], randBlob1[:])
commitment1, err := BlobToKZGCommitment(&blob1)
require.NoError(t, err)
blobs[0] = blob1[:]
commitments[0] = commitment1[:]
// Second blob - use invalid commitment size
randBlob2 := random.GetRandBlob(456)
var blob2 Blob
copy(blob2[:], randBlob2[:])
blobs[1] = blob2[:]
commitments[1] = make([]byte, 32) // Wrong size
// Add cell proofs for both blobs
for i := 0; i < blobCount; i++ {
for j := uint64(0); j < numberOfColumns; j++ {
allCellProofs = append(allCellProofs, make([]byte, 48))
}
}
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
})
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
numberOfColumns := uint64(128)
blobCount := 2
blobs := make([][]byte, blobCount)
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
for i := 0; i < blobCount; i++ {
randBlob := random.GetRandBlob(int64(i))
var blob Blob
copy(blob[:], randBlob[:])
commitment, err := BlobToKZGCommitment(&blob)
require.NoError(t, err)
blobs[i] = blob[:]
commitments[i] = commitment[:]
// Add cell proofs - make some invalid in the second blob
for j := uint64(0); j < numberOfColumns; j++ {
if i == 1 && j == 64 {
// Invalid proof size in middle of second blob's proofs
allCellProofs = append(allCellProofs, make([]byte, 20))
} else {
allCellProofs = append(allCellProofs, make([]byte, 48))
}
}
}
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
})
}

View File

@@ -133,7 +133,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
return preStateVersion, preStateHeader, nil
}
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
defer span.End()
@@ -159,7 +159,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
}
// Fill in missing blocks
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
}
@@ -309,7 +309,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
blockVersion := roBlock.Version()
block := roBlock.Block()
slot := block.Slot()

View File

@@ -30,10 +30,6 @@ import (
"github.com/sirupsen/logrus"
)
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
// CurrentSlot returns the current slot based on time.
func (s *Service) CurrentSlot() primitives.Slot {
return slots.CurrentSlot(s.genesisTime)
@@ -458,9 +454,6 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
// This is useful for block tree visualizer and additional vote accounting.
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
if fCheckpoint.Epoch > jCheckpoint.Epoch {
return ErrInvalidCheckpointArgs
}
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
// Fork choice only matters from last finalized slot.

View File

@@ -375,81 +375,6 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
}
func TestFillForkChoiceMissingBlocks_ErrorCases(t *testing.T) {
tests := []struct {
name string
finalizedEpoch primitives.Epoch
justifiedEpoch primitives.Epoch
expectedError error
}{
{
name: "finalized epoch greater than justified epoch",
finalizedEpoch: 5,
justifiedEpoch: 3,
expectedError: ErrInvalidCheckpointArgs,
},
{
name: "valid case - finalized equal to justified",
finalizedEpoch: 3,
justifiedEpoch: 3,
expectedError: nil,
},
{
name: "valid case - finalized less than justified",
finalizedEpoch: 2,
justifiedEpoch: 3,
expectedError: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
service, tr := minimalTestService(t)
ctx, beaconDB := tr.ctx, tr.db
st, _ := util.DeterministicGenesisState(t, 64)
require.NoError(t, service.saveGenesisData(ctx, st))
// Create a simple block for testing
blk := util.NewBeaconBlock()
blk.Block.Slot = 10
blk.Block.ParentRoot = service.originBlockRoot[:]
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
util.SaveBlock(t, ctx, beaconDB, blk)
// Create checkpoints with test case epochs
finalizedCheckpoint := &ethpb.Checkpoint{
Epoch: tt.finalizedEpoch,
Root: service.originBlockRoot[:],
}
justifiedCheckpoint := &ethpb.Checkpoint{
Epoch: tt.justifiedEpoch,
Root: service.originBlockRoot[:],
}
// Set up forkchoice store to avoid other errors
fcp := &ethpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
state, blkRoot, err := prepareForkchoiceState(ctx, 0, service.originBlockRoot, service.originBlockRoot, [32]byte{}, fcp, fcp)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
err = service.fillInForkChoiceMissingBlocks(
t.Context(), wsb, finalizedCheckpoint, justifiedCheckpoint)
if tt.expectedError != nil {
require.ErrorIs(t, err, tt.expectedError)
} else {
// For valid cases, we might get other errors (like block not being descendant of finalized)
// but we shouldn't get the checkpoint validation error
if err != nil && errors.Is(err, tt.expectedError) {
t.Errorf("Unexpected checkpoint validation error: %v", err)
}
}
})
}
}
// blockTree1 constructs the following tree:
//
// /- B1
@@ -2207,13 +2132,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
// Forkchoice has the genesisRoot loaded at startup
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
// Service's store has the justified checkpoint root as headRoot (verified below through justified checkpoint comparison)
// Service's store has the finalized state as headRoot
headRoot, err := service.HeadRoot(ctx)
require.NoError(t, err)
require.NotEqual(t, bytesutil.ToBytes32(params.BeaconConfig().ZeroHash[:]), bytesutil.ToBytes32(headRoot)) // Ensure head is not zero
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic) // Head is now optimistic when starting from justified checkpoint
require.Equal(t, false, optimistic)
// Check that the node's justified checkpoint does not agree with the
// last valid state's justified checkpoint

View File

@@ -40,8 +40,8 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
type BlockReceiver interface {
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
HasBlock(ctx context.Context, root [32]byte) bool
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
BlockBeingSynced([32]byte) bool
@@ -70,7 +70,7 @@ type SlashingReceiver interface {
// 1. Validate block, apply state transition and update checkpoints
// 2. Apply fork choice to the processed block
// 3. Save latest head info
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
defer span.End()
// Return early if the block is blacklisted
@@ -244,7 +244,7 @@ func (s *Service) handleDA(
ctx context.Context,
block interfaces.SignedBeaconBlock,
blockRoot [fieldparams.RootLength]byte,
avs das.AvailabilityStore,
avs das.AvailabilityChecker,
) (elapsed time.Duration, err error) {
defer func(start time.Time) {
elapsed = time.Since(start)
@@ -333,7 +333,7 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
// the state, performing batch verification of all collected signatures and then performing the appropriate
// actions for a block post-transition.
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
defer span.End()

View File

@@ -20,7 +20,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
return errors.Wrap(err, "could not set up forkchoice checkpoints")
}
if err := s.setupForkchoiceTree(st); err != nil {
return errors.Wrap(err, "could not set up forkchoice tree")
return errors.Wrap(err, "could not set up forkchoice root")
}
if err := s.initializeHead(s.ctx, st); err != nil {
return errors.Wrap(err, "could not initialize head from db")
@@ -30,24 +30,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
func (s *Service) startupHeadRoot() [32]byte {
headStr := features.Get().ForceHead
jp := s.CurrentJustifiedCheckpt()
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
cp := s.FinalizedCheckpt()
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
if headStr == "" {
return jRoot
return fRoot
}
if headStr == "head" {
root, err := s.cfg.BeaconDB.HeadBlockRoot()
if err != nil {
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
return jRoot
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
return fRoot
}
log.Infof("Using Head root of %#x", root)
return root
}
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
if err != nil {
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
return jRoot
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
return fRoot
}
return [32]byte(root)
}

View File

@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
})
defer resetCfg()
require.Equal(t, service.startupHeadRoot(), gr)
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
})
st, _ := util.DeterministicGenesisState(t, 64)

View File

@@ -275,7 +275,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
}
// ReceiveBlockBatch processes blocks in batches from initial-sync.
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error {
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
if s.State == nil {
return ErrNilState
}
@@ -305,7 +305,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
}
// ReceiveBlock mocks ReceiveBlock method in chain service.
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error {
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityChecker) error {
if s.ReceiveBlockMockErr != nil {
return s.ReceiveBlockMockErr
}

View File

@@ -67,30 +67,6 @@ func (s *SyncCommitteeCache) Clear() {
s.cache = cache.NewFIFO(keyFn)
}
// CurrentPeriodPositions returns current period positions of validator indices with respect with
// sync committee. If any input validator index has no assignment, an empty list will be returned
// for that validator. If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
// Manual checking of state for index position in state is recommended when `ErrNonExistingSyncCommitteeKey` is returned.
func (s *SyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
s.lock.RLock()
defer s.lock.RUnlock()
pos, err := s.positionsInCommittee(root, indices)
if err != nil {
return nil, err
}
result := make([][]primitives.CommitteeIndex, len(pos))
for i, p := range pos {
if p == nil {
result[i] = []primitives.CommitteeIndex{}
} else {
result[i] = p.currentPeriod
}
}
return result, nil
}
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
// sync committee. If the input validator index has no assignment, an empty list will be returned.
// If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
@@ -128,7 +104,11 @@ func (s *SyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx primi
return pos.nextPeriod, nil
}
func (s *SyncCommitteeCache) positionsInCommittee(root [32]byte, indices []primitives.ValidatorIndex) ([]*positionInCommittee, error) {
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
// of validator index to its index(s) position in the sync committee.
func (s *SyncCommitteeCache) idxPositionInCommittee(
root [32]byte, valIdx primitives.ValidatorIndex,
) (*positionInCommittee, error) {
obj, exists, err := s.cache.GetByKey(key(root))
if err != nil {
return nil, err
@@ -141,33 +121,13 @@ func (s *SyncCommitteeCache) positionsInCommittee(root [32]byte, indices []primi
if !ok {
return nil, errNotSyncCommitteeIndexPosition
}
result := make([]*positionInCommittee, len(indices))
for i, idx := range indices {
idxInCommittee, ok := item.vIndexToPositionMap[idx]
if ok {
SyncCommitteeCacheHit.Inc()
result[i] = idxInCommittee
} else {
SyncCommitteeCacheMiss.Inc()
result[i] = nil
}
}
return result, nil
}
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
// of validator index to its index(s) position in the sync committee.
func (s *SyncCommitteeCache) idxPositionInCommittee(
root [32]byte, valIdx primitives.ValidatorIndex,
) (*positionInCommittee, error) {
positions, err := s.positionsInCommittee(root, []primitives.ValidatorIndex{valIdx})
if err != nil {
return nil, err
}
if len(positions) == 0 {
idxInCommittee, ok := item.vIndexToPositionMap[valIdx]
if !ok {
SyncCommitteeCacheMiss.Inc()
return nil, nil
}
return positions[0], nil
SyncCommitteeCacheHit.Inc()
return idxInCommittee, nil
}
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to

View File

@@ -16,11 +16,6 @@ func NewSyncCommittee() *FakeSyncCommitteeCache {
return &FakeSyncCommitteeCache{}
}
// CurrentPeriodPositions -- fake
func (s *FakeSyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
return nil, nil
}
// CurrentEpochIndexPosition -- fake.
func (s *FakeSyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx primitives.ValidatorIndex) ([]primitives.CommitteeIndex, error) {
return nil, nil

View File

@@ -5,7 +5,6 @@ import (
"sort"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/container/slice"
@@ -40,11 +39,11 @@ func ProcessAttesterSlashings(
ctx context.Context,
beaconState state.BeaconState,
slashings []ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
var err error
for _, slashing := range slashings {
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, slashFunc)
if err != nil {
return nil, err
}
@@ -57,7 +56,7 @@ func ProcessAttesterSlashing(
ctx context.Context,
beaconState state.BeaconState,
slashing ethpb.AttSlashing,
exitInfo *validators.ExitInfo,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify attester slashing")
@@ -76,9 +75,10 @@ func ProcessAttesterSlashing(
return nil, err
}
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
beaconState, err = validators.SlashValidator(ctx, beaconState, primitives.ValidatorIndex(validatorIndex), exitInfo)
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex))
if err != nil {
return nil, errors.Wrapf(err, "could not slash validator index %d", validatorIndex)
return nil, errors.Wrapf(err, "could not slash validator index %d",
validatorIndex)
}
slashedAny = true
}

View File

@@ -4,7 +4,6 @@ import (
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
@@ -45,10 +44,11 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
Target: &ethpb.Checkpoint{Epoch: 1}},
})}}
var registry []*ethpb.Validator
currentSlot := primitives.Slot(0)
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{{}},
Validators: registry,
Slot: currentSlot,
})
require.NoError(t, err)
@@ -62,15 +62,16 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
assert.ErrorContains(t, "attestations are not slashable", err)
}
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
var registry []*ethpb.Validator
currentSlot := primitives.Slot(0)
beaconState, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: []*ethpb.Validator{{}},
Validators: registry,
Slot: currentSlot,
})
require.NoError(t, err)
@@ -100,7 +101,7 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
}
@@ -242,7 +243,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
require.NoError(t, tc.st.SetSlot(currentSlot))
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.ExitInformation(tc.st))
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
@@ -264,83 +265,3 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
})
}
}
func TestProcessAttesterSlashing_ExitEpochGetsUpdated(t *testing.T) {
st, keys := util.DeterministicGenesisStateElectra(t, 8)
bal, err := helpers.TotalActiveBalance(st)
require.NoError(t, err)
perEpochChurn := helpers.ActivationExitChurnLimit(primitives.Gwei(bal))
vals := st.Validators()
// We set the total effective balance of slashed validators
// higher than the churn limit for a single epoch.
vals[0].EffectiveBalance = uint64(perEpochChurn / 3)
vals[1].EffectiveBalance = uint64(perEpochChurn / 3)
vals[2].EffectiveBalance = uint64(perEpochChurn / 3)
vals[3].EffectiveBalance = uint64(perEpochChurn / 3)
require.NoError(t, st.SetValidators(vals))
sl1att1 := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{0, 1},
})
sl1att2 := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
AttestingIndices: []uint64{0, 1},
})
slashing1 := &ethpb.AttesterSlashingElectra{
Attestation_1: sl1att1,
Attestation_2: sl1att2,
}
sl2att1 := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
Data: &ethpb.AttestationData{
Source: &ethpb.Checkpoint{Epoch: 1},
},
AttestingIndices: []uint64{2, 3},
})
sl2att2 := util.HydrateIndexedAttestationElectra(&ethpb.IndexedAttestationElectra{
AttestingIndices: []uint64{2, 3},
})
slashing2 := &ethpb.AttesterSlashingElectra{
Attestation_1: sl2att1,
Attestation_2: sl2att2,
}
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
signingRoot, err := signing.ComputeSigningRoot(sl1att1.GetData(), domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 := keys[0].Sign(signingRoot[:])
sig1 := keys[1].Sign(signingRoot[:])
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
sl1att1.Signature = aggregateSig.Marshal()
signingRoot, err = signing.ComputeSigningRoot(sl1att2.GetData(), domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = keys[0].Sign(signingRoot[:])
sig1 = keys[1].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
sl1att2.Signature = aggregateSig.Marshal()
signingRoot, err = signing.ComputeSigningRoot(sl2att1.GetData(), domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = keys[2].Sign(signingRoot[:])
sig1 = keys[3].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
sl2att1.Signature = aggregateSig.Marshal()
signingRoot, err = signing.ComputeSigningRoot(sl2att2.GetData(), domain)
assert.NoError(t, err, "Could not get signing root of beacon block header")
sig0 = keys[2].Sign(signingRoot[:])
sig1 = keys[3].Sign(signingRoot[:])
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
sl2att2.Signature = aggregateSig.Marshal()
exitInfo := v.ExitInformation(st)
assert.Equal(t, primitives.Epoch(0), exitInfo.HighestExitEpoch)
_, err = blocks.ProcessAttesterSlashings(t.Context(), st, []ethpb.AttSlashing{slashing1, slashing2}, exitInfo)
require.NoError(t, err)
assert.Equal(t, primitives.Epoch(6), exitInfo.HighestExitEpoch)
}

View File

@@ -191,7 +191,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
fuzzer.Fuzz(p)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.ExitInformation(s))
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.SlashValidator)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, p)
}
@@ -224,7 +224,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
fuzzer.Fuzz(a)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.ExitInformation(s))
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.SlashValidator)
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
}
@@ -334,7 +334,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
fuzzer.Fuzz(e)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e})
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and exit: %v", r, err, state, e)
}
@@ -351,7 +351,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
fuzzer.Fuzz(e)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e})
if err != nil && r != nil {
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, e)
}

View File

@@ -94,7 +94,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
for i, s := range b.Block.Body.AttesterSlashings {
ss[i] = s
}
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
require.NoError(t, err)
newRegistry := newState.Validators()
if !newRegistry[expectedSlashedVal].Slashed {

View File

@@ -9,6 +9,7 @@ import (
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
@@ -49,12 +50,13 @@ func ProcessVoluntaryExits(
ctx context.Context,
beaconState state.BeaconState,
exits []*ethpb.SignedVoluntaryExit,
exitInfo *v.ExitInfo,
) (state.BeaconState, error) {
// Avoid calculating the epoch churn if no exits exist.
if len(exits) == 0 {
return beaconState, nil
}
maxExitEpoch, churn := v.MaxExitEpochAndChurn(beaconState)
var exitEpoch primitives.Epoch
for idx, exit := range exits {
if exit == nil || exit.Exit == nil {
return nil, errors.New("nil voluntary exit in block body")
@@ -66,8 +68,15 @@ func ProcessVoluntaryExits(
if err := VerifyExitAndSignature(val, beaconState, exit); err != nil {
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
}
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, exitInfo)
if err != nil && !errors.Is(err, v.ErrValidatorAlreadyExited) {
beaconState, exitEpoch, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, maxExitEpoch, churn)
if err == nil {
if exitEpoch > maxExitEpoch {
maxExitEpoch = exitEpoch
churn = 1
} else if exitEpoch == maxExitEpoch {
churn++
}
} else if !errors.Is(err, v.ErrValidatorAlreadyExited) {
return nil, err
}
}

View File

@@ -7,7 +7,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -47,7 +46,7 @@ func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) {
}
want := "validator has not been active long enough to exit"
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
assert.ErrorContains(t, want, err)
}
@@ -77,7 +76,7 @@ func TestProcessVoluntaryExits_ExitAlreadySubmitted(t *testing.T) {
}
want := "validator with index 0 has already submitted an exit, which will take place at epoch: 10"
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
assert.ErrorContains(t, want, err)
}
@@ -125,7 +124,7 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
},
}
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
require.NoError(t, err, "Could not process exits")
newRegistry := newState.Validators()
if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(primitives.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)) {

View File

@@ -7,9 +7,9 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
@@ -19,6 +19,11 @@ import (
// ErrCouldNotVerifyBlockHeader is returned when a block header's signature cannot be verified.
var ErrCouldNotVerifyBlockHeader = errors.New("could not verify beacon block header")
type slashValidatorFunc func(
ctx context.Context,
st state.BeaconState,
vid primitives.ValidatorIndex) (state.BeaconState, error)
// ProcessProposerSlashings is one of the operations performed
// on each processed beacon block to slash proposers based on
// slashing conditions if any slashable events occurred.
@@ -49,11 +54,11 @@ func ProcessProposerSlashings(
ctx context.Context,
beaconState state.BeaconState,
slashings []*ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
var err error
for _, slashing := range slashings {
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, slashFunc)
if err != nil {
return nil, err
}
@@ -66,7 +71,7 @@ func ProcessProposerSlashing(
ctx context.Context,
beaconState state.BeaconState,
slashing *ethpb.ProposerSlashing,
exitInfo *validators.ExitInfo,
slashFunc slashValidatorFunc,
) (state.BeaconState, error) {
var err error
if slashing == nil {
@@ -75,7 +80,7 @@ func ProcessProposerSlashing(
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
return nil, errors.Wrap(err, "could not verify proposer slashing")
}
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
}

View File

@@ -50,7 +50,7 @@ func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) {
},
}
want := "mismatched header slots"
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
assert.ErrorContains(t, want, err)
}
@@ -83,7 +83,7 @@ func TestProcessProposerSlashings_SameHeaders(t *testing.T) {
},
}
want := "expected slashing headers to differ"
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
assert.ErrorContains(t, want, err)
}
@@ -133,7 +133,7 @@ func TestProcessProposerSlashings_ValidatorNotSlashable(t *testing.T) {
"validator with key %#x is not slashable",
bytesutil.ToBytes48(beaconState.Validators()[0].PublicKey),
)
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
assert.ErrorContains(t, want, err)
}
@@ -172,7 +172,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
block := util.NewBeaconBlock()
block.Block.Body.ProposerSlashings = slashings
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
require.NoError(t, err)
newStateVals := newState.Validators()
@@ -220,7 +220,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusAltair(t *testing.T) {
block := util.NewBeaconBlock()
block.Block.Body.ProposerSlashings = slashings
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
require.NoError(t, err)
newStateVals := newState.Validators()
@@ -268,7 +268,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
block := util.NewBeaconBlock()
block.Block.Body.ProposerSlashings = slashings
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
require.NoError(t, err)
newStateVals := newState.Validators()
@@ -316,7 +316,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusCapella(t *testing.T) {
block := util.NewBeaconBlock()
block.Block.Body.ProposerSlashings = slashings
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
require.NoError(t, err)
newStateVals := newState.Validators()

View File

@@ -84,8 +84,8 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
// Handle validator ejections.
for _, idx := range eligibleForEjection {
var err error
// exit info is not used in electra
st, err = validators.InitiateValidatorExit(ctx, st, idx, &validators.ExitInfo{})
// exitQueueEpoch and churn arguments are not used in electra.
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, 0 /*exitQueueEpoch*/, 0 /*churn*/)
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
return fmt.Errorf("failed to initiate validator exit at index %d: %w", idx, err)
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -47,21 +46,18 @@ var (
// # [New in Electra:EIP7251]
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
func ProcessOperations(
ctx context.Context,
st state.BeaconState,
block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
// 6110 validations are in VerifyOperationLengths
bb := block.Body()
// Electra extends the altair operations.
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
st, err := ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process altair proposer slashing")
}
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process altair attester slashing")
}
@@ -72,7 +68,7 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
return nil, errors.Wrap(err, "could not process altair deposit")
}
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits())
if err != nil {
return nil, errors.Wrap(err, "could not process voluntary exits")
}

View File

@@ -147,8 +147,9 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
if isFullExitRequest {
// Only exit validator if it has no pending withdrawals in the queue
if pendingBalanceToWithdraw == 0 {
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
var err error
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
st, _, err = validators.InitiateValidatorExit(ctx, st, vIdx, maxExitEpoch, churn)
if err != nil {
return nil, err
}

View File

@@ -99,7 +99,8 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
for _, idx := range eligibleForEjection {
// Here is fine to do a quadratic loop since this should
// barely happen
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, maxExitEpoch, churn)
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
}

View File

@@ -16,10 +16,10 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
if err := electra.ProcessEpoch(ctx, state); err != nil {
return errors.Wrap(err, "could not process epoch in fulu transition")
}
return ProcessProposerLookahead(ctx, state)
return processProposerLookahead(ctx, state)
}
func ProcessProposerLookahead(ctx context.Context, state state.BeaconState) error {
func processProposerLookahead(ctx context.Context, state state.BeaconState) error {
_, span := trace.StartSpan(ctx, "fulu.processProposerLookahead")
defer span.End()

View File

@@ -317,15 +317,23 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
}
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
originalStateSlot := state.Slot()
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
// Skip proposer assignment for genesis slot.
if slot == 0 {
continue
}
// Set the state's current slot.
if err := state.SetSlot(slot); err != nil {
return nil, err
}
// Determine the proposer index for the current slot.
i, err := BeaconProposerIndexAtSlot(ctx, state, slot)
i, err := BeaconProposerIndex(ctx, state)
if err != nil {
return nil, errors.Wrapf(err, "could not check proposer at slot %d", slot)
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
}
// Append the slot to the proposer's assignments.
@@ -334,6 +342,12 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
}
proposerAssignments[i] = append(proposerAssignments[i], slot)
}
// Reset state back to its original slot.
if err := state.SetSlot(originalStateSlot); err != nil {
return nil, err
}
return proposerAssignments, nil
}

View File

@@ -87,11 +87,6 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
return total, nil
}
// UpdateTotalActiveBalanceCache updates the cache with the given total active balance.
func UpdateTotalActiveBalanceCache(s state.BeaconState, total uint64) error {
return balanceCache.AddTotalEffectiveBalance(s, total)
}
// IncreaseBalance increases validator with the given 'index' balance by 'delta' in Gwei.
//
// Spec pseudocode definition:

View File

@@ -297,30 +297,3 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
}
}
func TestUpdateTotalActiveBalanceCache(t *testing.T) {
helpers.ClearCache()
// Create a test state with some validators
validators := []*ethpb.Validator{
{EffectiveBalance: 32 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
{EffectiveBalance: 32 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
{EffectiveBalance: 31 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: validators,
Slot: 0,
})
require.NoError(t, err)
// Test updating cache with a specific total
testTotal := uint64(95 * 1e9) // 32 + 32 + 31 = 95
err = helpers.UpdateTotalActiveBalanceCache(state, testTotal)
require.NoError(t, err)
// Verify the cache was updated by retrieving the total active balance
// which should now return the cached value
cachedTotal, err := helpers.TotalActiveBalance(state)
require.NoError(t, err)
assert.Equal(t, testTotal, cachedTotal, "Cache should return the updated total")
}

View File

@@ -21,39 +21,6 @@ var (
syncCommitteeCache = cache.NewSyncCommittee()
)
// CurrentPeriodPositions returns committee indices of the current period sync committee for input validators.
func CurrentPeriodPositions(st state.BeaconState, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
root, err := SyncPeriodBoundaryRoot(st)
if err != nil {
return nil, err
}
pos, err := syncCommitteeCache.CurrentPeriodPositions(root, indices)
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
committee, err := st.CurrentSyncCommittee()
if err != nil {
return nil, err
}
// Fill in the cache on miss.
go func() {
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
log.WithError(err).Error("Could not fill sync committee cache on miss")
}
}()
pos = make([][]primitives.CommitteeIndex, len(indices))
for i, idx := range indices {
pubkey := st.PubkeyAtIndex(idx)
pos[i] = findSubCommitteeIndices(pubkey[:], committee.Pubkeys)
}
return pos, nil
}
if err != nil {
return nil, err
}
return pos, nil
}
// IsCurrentPeriodSyncCommittee returns true if the input validator index belongs in the current period sync committee
// along with the sync committee root.
// 1. Checks if the public key exists in the sync committee cache

View File

@@ -17,38 +17,6 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/require"
)
func TestCurrentPeriodPositions(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
syncCommittee := &ethpb.SyncCommittee{
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
}
for i := 0; i < len(validators); i++ {
k := make([]byte, 48)
copy(k, strconv.Itoa(i))
validators[i] = &ethpb.Validator{
PublicKey: k,
}
syncCommittee.Pubkeys[i] = bytesutil.PadTo(k, 48)
}
state, err := state_native.InitializeFromProtoAltair(&ethpb.BeaconStateAltair{
Validators: validators,
})
require.NoError(t, err)
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee([32]byte{}, state))
positions, err := helpers.CurrentPeriodPositions(state, []primitives.ValidatorIndex{0, 1})
require.NoError(t, err)
require.Equal(t, 2, len(positions))
require.Equal(t, 1, len(positions[0]))
assert.Equal(t, primitives.CommitteeIndex(0), positions[0][0])
require.Equal(t, 1, len(positions[1]))
assert.Equal(t, primitives.CommitteeIndex(1), positions[1][0])
}
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
helpers.ClearCache()

View File

@@ -309,29 +309,23 @@ func beaconProposerIndexAtSlotFulu(state state.ReadOnlyBeaconState, slot primiti
if err != nil {
return 0, errors.Wrap(err, "could not get proposer lookahead")
}
spe := params.BeaconConfig().SlotsPerEpoch
if e == stateEpoch {
return lookAhead[slot%spe], nil
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch], nil
}
// The caller is requesting the proposer for the next epoch
return lookAhead[spe+slot%spe], nil
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch+params.BeaconConfig().SlotsPerEpoch], nil
}
// BeaconProposerIndexAtSlot returns proposer index at the given slot from the
// point of view of the given state as head state
func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(state.Slot())
// Even if the state is post Fulu, we may request a past proposer index.
if state.Version() >= version.Fulu && e >= params.BeaconConfig().FuluForkEpoch {
// We can use the cached lookahead only for the current and the next epoch.
if e == stateEpoch || e == stateEpoch+1 {
return beaconProposerIndexAtSlotFulu(state, slot)
}
if state.Version() >= version.Fulu {
return beaconProposerIndexAtSlotFulu(state, slot)
}
e := slots.ToEpoch(slot)
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
// For simplicity, the node will skip caching of genesis epoch. If the passed state has not yet reached this slot then we do not check the cache.
if e <= stateEpoch && e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
// For simplicity, the node will skip caching of genesis epoch.
if e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
s, err := slots.EpochEnd(e - 1)
if err != nil {
return 0, err

View File

@@ -1161,10 +1161,6 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
}
func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.FuluForkEpoch = 1
params.OverrideBeaconConfig(cfg)
lookahead := make([]uint64, 64)
lookahead[0] = 15
lookahead[1] = 16
@@ -1184,4 +1180,8 @@ func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 130)
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(42), idx)
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 95)
require.ErrorContains(t, "slot 95 is not in the current epoch 3 or the next epoch", err)
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 160)
require.ErrorContains(t, "slot 160 is not in the current epoch 3 or the next epoch", err)
}

View File

@@ -107,3 +107,29 @@ func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCac
return key
}
// ColumnIndices is a map of column indices where the key is the column index and the value is a boolean.
// The boolean could indicate different things, eg whether the column is needed (in the context of satisfying custody requirements)
// or present (in the context of a custody check on disk or in cache).
type ColumnIndices map[uint64]bool
// CopyTrueIndices allows callers to get a copy of the given ColumnIndices, filtering out any keys
// where the value == `false`.
func CopyTrueIndices(src ColumnIndices) ColumnIndices {
dst := make(ColumnIndices, len(src))
for k, v := range src {
if v {
dst[k] = true
}
}
return dst
}
// ColumnIndicesFromSlice converts a slice of uint64 indices into the ColumnIndices equivalent.
func ColumnIndicesFromSlice(indices []uint64) ColumnIndices {
ci := make(ColumnIndices, len(indices))
for _, index := range indices {
ci[index] = true
}
return ci
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
b "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
@@ -375,18 +374,15 @@ func ProcessBlockForStateRoot(
}
// This calls altair block operations.
func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
func altairOperations(
ctx context.Context,
st state.BeaconState,
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
st, err := b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process altair proposer slashing")
}
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process altair attester slashing")
}
@@ -397,7 +393,7 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
return nil, errors.Wrap(err, "could not process altair deposit")
}
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
if err != nil {
return nil, errors.Wrap(err, "could not process voluntary exits")
}
@@ -405,18 +401,15 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
}
// This calls phase 0 block operations.
func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
exitInfo := v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
func phase0Operations(
ctx context.Context,
st state.BeaconState,
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
st, err := b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process block proposer slashings")
}
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), v.SlashValidator)
if err != nil {
return nil, errors.Wrap(err, "could not process block attester slashings")
}
@@ -427,9 +420,5 @@ func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock int
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
return nil, errors.Wrap(err, "could not process deposits")
}
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
if err != nil {
return nil, errors.Wrap(err, "could not process voluntary exits")
}
return st, nil
return b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
}

View File

@@ -13,55 +13,34 @@ import (
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/math"
mathutil "github.com/OffchainLabs/prysm/v6/math"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
)
// ExitInfo provides information about validator exits in the state.
type ExitInfo struct {
HighestExitEpoch primitives.Epoch
Churn uint64
TotalActiveBalance uint64
}
// ErrValidatorAlreadyExited is an error raised when trying to process an exit of
// an already exited validator
var ErrValidatorAlreadyExited = errors.New("validator already exited")
// ExitInformation returns information about validator exits.
func ExitInformation(s state.BeaconState) *ExitInfo {
exitInfo := &ExitInfo{}
// MaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
// epoch and the number of them
func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
currentEpoch := slots.ToEpoch(s.Slot())
totalActiveBalance := uint64(0)
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
e := val.ExitEpoch()
if e != farFutureEpoch {
if e > exitInfo.HighestExitEpoch {
exitInfo.HighestExitEpoch = e
exitInfo.Churn = 1
} else if e == exitInfo.HighestExitEpoch {
exitInfo.Churn++
if e > maxExitEpoch {
maxExitEpoch = e
churn = 1
} else if e == maxExitEpoch {
churn++
}
}
// Calculate total active balance in the same loop
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
totalActiveBalance += val.EffectiveBalance()
}
return nil
})
_ = err
// Apply minimum balance as per spec
exitInfo.TotalActiveBalance = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
return exitInfo
return
}
// InitiateValidatorExit takes in validator index and updates
@@ -85,117 +64,59 @@ func ExitInformation(s state.BeaconState) *ExitInfo {
// # Set validator exit epoch and withdrawable epoch
// validator.exit_epoch = exit_queue_epoch
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
func InitiateValidatorExit(
ctx context.Context,
s state.BeaconState,
idx primitives.ValidatorIndex,
exitInfo *ExitInfo,
) (state.BeaconState, error) {
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex, exitQueueEpoch primitives.Epoch, churn uint64) (state.BeaconState, primitives.Epoch, error) {
validator, err := s.ValidatorAtIndex(idx)
if err != nil {
return nil, err
return nil, 0, err
}
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return s, ErrValidatorAlreadyExited
return s, validator.ExitEpoch, ErrValidatorAlreadyExited
}
// Compute exit queue epoch.
if s.Version() < version.Electra {
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
return nil, err
// Relevant spec code from phase0:
//
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
// if exit_queue_churn >= get_validator_churn_limit(state):
// exit_queue_epoch += Epoch(1)
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
if exitableEpoch > exitQueueEpoch {
exitQueueEpoch = exitableEpoch
churn = 0
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
if err != nil {
return nil, 0, errors.Wrap(err, "could not get active validator count")
}
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
if churn >= currentChurn {
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
if err != nil {
return nil, 0, err
}
}
} else {
// [Modified in Electra:EIP7251]
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
var err error
exitInfo.HighestExitEpoch, err = s.ExitEpochAndUpdateChurn(primitives.Gwei(validator.EffectiveBalance))
exitQueueEpoch, err = s.ExitEpochAndUpdateChurn(primitives.Gwei(validator.EffectiveBalance))
if err != nil {
return nil, err
return nil, 0, err
}
}
validator.ExitEpoch = exitInfo.HighestExitEpoch
validator.WithdrawableEpoch, err = exitInfo.HighestExitEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
validator.ExitEpoch = exitQueueEpoch
validator.WithdrawableEpoch, err = exitQueueEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
if err != nil {
return nil, err
return nil, 0, err
}
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
return nil, err
return nil, 0, err
}
return s, nil
}
// InitiateValidatorExitForTotalBal has the same functionality as InitiateValidatorExit,
// the only difference being how total active balance is obtained. In InitiateValidatorExit
// it is calculated inside the function and in InitiateValidatorExitForTotalBal it's a
// function argument.
func InitiateValidatorExitForTotalBal(
ctx context.Context,
s state.BeaconState,
idx primitives.ValidatorIndex,
exitInfo *ExitInfo,
totalActiveBalance primitives.Gwei,
) (state.BeaconState, error) {
validator, err := s.ValidatorAtIndex(idx)
if err != nil {
return nil, err
}
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
return s, ErrValidatorAlreadyExited
}
// Compute exit queue epoch.
if s.Version() < version.Electra {
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
return nil, err
}
} else {
// [Modified in Electra:EIP7251]
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
var err error
exitInfo.HighestExitEpoch, err = s.ExitEpochAndUpdateChurnForTotalBal(totalActiveBalance, primitives.Gwei(validator.EffectiveBalance))
if err != nil {
return nil, err
}
}
validator.ExitEpoch = exitInfo.HighestExitEpoch
validator.WithdrawableEpoch, err = exitInfo.HighestExitEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
if err != nil {
return nil, err
}
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
return nil, err
}
return s, nil
}
func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, exitInfo *ExitInfo) error {
// Relevant spec code from phase0:
//
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
// if exit_queue_churn >= get_validator_churn_limit(state):
// exit_queue_epoch += Epoch(1)
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
if exitableEpoch > exitInfo.HighestExitEpoch {
exitInfo.HighestExitEpoch = exitableEpoch
exitInfo.Churn = 0
}
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
if err != nil {
return errors.Wrap(err, "could not get active validator count")
}
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
if exitInfo.Churn >= currentChurn {
exitInfo.HighestExitEpoch, err = exitInfo.HighestExitEpoch.SafeAdd(1)
if err != nil {
return err
}
exitInfo.Churn = 1
} else {
exitInfo.Churn = exitInfo.Churn + 1
}
return nil
return s, exitQueueEpoch, nil
}
// SlashValidator slashes the malicious validator's balance and awards
@@ -231,12 +152,9 @@ func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, e
func SlashValidator(
ctx context.Context,
s state.BeaconState,
slashedIdx primitives.ValidatorIndex,
exitInfo *ExitInfo,
) (state.BeaconState, error) {
var err error
s, err = InitiateValidatorExitForTotalBal(ctx, s, slashedIdx, exitInfo, primitives.Gwei(exitInfo.TotalActiveBalance))
slashedIdx primitives.ValidatorIndex) (state.BeaconState, error) {
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
}

View File

@@ -49,11 +49,9 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
exitInfo := &validators.ExitInfo{HighestExitEpoch: 199, Churn: 1}
newState, err := validators.InitiateValidatorExit(t.Context(), state, 0, exitInfo)
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, 0, 199, 1)
require.ErrorIs(t, err, validators.ErrValidatorAlreadyExited)
assert.Equal(t, primitives.Epoch(199), exitInfo.HighestExitEpoch)
assert.Equal(t, uint64(1), exitInfo.Churn)
require.Equal(t, exitEpoch, epoch)
v, err := newState.ValidatorAtIndex(0)
require.NoError(t, err)
assert.Equal(t, exitEpoch, v.ExitEpoch, "Already exited")
@@ -70,11 +68,9 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
exitInfo := &validators.ExitInfo{HighestExitEpoch: exitedEpoch + 2, Churn: 1}
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitInfo)
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitedEpoch+2, 1)
require.NoError(t, err)
assert.Equal(t, exitedEpoch+2, exitInfo.HighestExitEpoch)
assert.Equal(t, uint64(2), exitInfo.Churn)
require.Equal(t, exitedEpoch+2, epoch)
v, err := newState.ValidatorAtIndex(idx)
require.NoError(t, err)
assert.Equal(t, exitedEpoch+2, v.ExitEpoch, "Exit epoch was not the highest")
@@ -92,11 +88,9 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
exitInfo := &validators.ExitInfo{HighestExitEpoch: exitedEpoch + 2, Churn: 4}
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitInfo)
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitedEpoch+2, 4)
require.NoError(t, err)
assert.Equal(t, exitedEpoch+3, exitInfo.HighestExitEpoch)
assert.Equal(t, uint64(1), exitInfo.Churn)
require.Equal(t, exitedEpoch+3, epoch)
// Because of exit queue overflow,
// validator who init exited has to wait one more epoch.
@@ -116,8 +110,7 @@ func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) {
}}
state, err := state_native.InitializeFromProtoPhase0(base)
require.NoError(t, err)
exitInfo := &validators.ExitInfo{HighestExitEpoch: params.BeaconConfig().FarFutureEpoch - 1, Churn: 1}
_, err = validators.InitiateValidatorExit(t.Context(), state, 1, exitInfo)
_, _, err = validators.InitiateValidatorExit(t.Context(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
require.ErrorContains(t, "addition overflows", err)
}
@@ -153,11 +146,12 @@ func TestInitiateValidatorExit_ProperExit_Electra(t *testing.T) {
require.NoError(t, err)
require.Equal(t, primitives.Gwei(0), ebtc)
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, &validators.ExitInfo{}) // exit info is not used in electra
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, 0, 0) // exitQueueEpoch and churn are not used in electra
require.NoError(t, err)
// Expect that the exit epoch is the next available epoch with max seed lookahead.
want := helpers.ActivationExitEpoch(exitedEpoch + 1)
require.Equal(t, want, epoch)
v, err := newState.ValidatorAtIndex(idx)
require.NoError(t, err)
assert.Equal(t, want, v.ExitEpoch, "Exit epoch was not the highest")
@@ -196,7 +190,7 @@ func TestSlashValidator_OK(t *testing.T) {
require.NoError(t, err, "Could not get proposer")
proposerBal, err := state.BalanceAtIndex(proposer)
require.NoError(t, err)
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx, validators.ExitInformation(state))
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx)
require.NoError(t, err, "Could not slash validator")
require.Equal(t, true, slashedState.Version() == version.Phase0)
@@ -250,7 +244,7 @@ func TestSlashValidator_Electra(t *testing.T) {
require.NoError(t, err, "Could not get proposer")
proposerBal, err := state.BalanceAtIndex(proposer)
require.NoError(t, err)
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx, validators.ExitInformation(state))
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx)
require.NoError(t, err, "Could not slash validator")
require.Equal(t, true, slashedState.Version() == version.Electra)
@@ -511,8 +505,8 @@ func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
for _, tt := range tests {
s, err := state_native.InitializeFromProtoPhase0(tt.state)
require.NoError(t, err)
exitInfo := validators.ExitInformation(s)
require.Equal(t, tt.wantedEpoch, exitInfo.HighestExitEpoch)
require.Equal(t, tt.wantedChurn, exitInfo.Churn)
epoch, churn := validators.MaxExitEpochAndChurn(s)
require.Equal(t, tt.wantedEpoch, epoch)
require.Equal(t, tt.wantedChurn, churn)
}
}

View File

@@ -4,6 +4,7 @@ go_library(
name = "go_default_library",
srcs = [
"availability_blobs.go",
"availability_columns.go",
"blob_cache.go",
"data_column_cache.go",
"iface.go",
@@ -12,6 +13,7 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/verification:go_default_library",
"//config/fieldparams:go_default_library",
@@ -21,6 +23,7 @@ go_library(
"//runtime/logging:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
@@ -30,6 +33,7 @@ go_test(
name = "go_default_test",
srcs = [
"availability_blobs_test.go",
"availability_columns_test.go",
"blob_cache_test.go",
"data_column_cache_test.go",
],
@@ -45,6 +49,7 @@ go_test(
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -29,7 +29,7 @@ type LazilyPersistentStoreBlob struct {
verifier BlobBatchVerifier
}
var _ AvailabilityStore = &LazilyPersistentStoreBlob{}
var _ AvailabilityChecker = &LazilyPersistentStoreBlob{}
// BlobBatchVerifier enables LazyAvailabilityStore to manage the verification process
// going from ROBlob->VerifiedROBlob, while avoiding the decision of which individual verifications

View File

@@ -0,0 +1,208 @@
package das
import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
errors "github.com/pkg/errors"
)
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
// block, at which time they will undergo full verification and be saved to the disk.
type LazilyPersistentStoreColumn struct {
store *filesystem.DataColumnStorage
nodeID enode.ID
cache *dataColumnCache
newDataColumnsVerifier verification.NewDataColumnsVerifier
custodyGroupCount uint64
}
var _ AvailabilityChecker = &LazilyPersistentStoreColumn{}
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
// they are all available, the interface takes a slice of data column sidecars.
type DataColumnsVerifier interface {
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
}
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
func NewLazilyPersistentStoreColumn(
store *filesystem.DataColumnStorage,
nodeID enode.ID,
newDataColumnsVerifier verification.NewDataColumnsVerifier,
custodyGroupCount uint64,
) *LazilyPersistentStoreColumn {
return &LazilyPersistentStoreColumn{
store: store,
nodeID: nodeID,
cache: newDataColumnCache(),
newDataColumnsVerifier: newDataColumnsVerifier,
custodyGroupCount: custodyGroupCount,
}
}
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
// by the given block are guaranteed to be persisted for the remainder of the retention period.
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.RODataColumn) error {
if len(sidecars) == 0 {
return nil
}
// It is safe to retrieve the first sidecar.
firstSidecar := sidecars[0]
if len(sidecars) > 1 {
firstRoot := firstSidecar.BlockRoot()
for _, sidecar := range sidecars[1:] {
if sidecar.BlockRoot() != firstRoot {
return errMixedRoots
}
}
}
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
return nil
}
key := cacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
entry := s.cache.ensure(key)
for _, sidecar := range sidecars {
if err := entry.stash(&sidecar); err != nil {
return errors.Wrap(err, "stash DataColumnSidecar")
}
}
return nil
}
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
if err != nil {
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
}
// Return early for blocks that do not have any commitments.
if blockCommitments.count() == 0 {
return nil
}
// Get the root of the block.
blockRoot := block.Root()
// Build the cache key for the block.
key := cacheKey{slot: block.Block().Slot(), root: blockRoot}
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
entry := s.cache.ensure(key)
// Delete the cache entry for the block at the end.
defer s.cache.delete(key)
// Set the disk summary for the block in the cache entry.
entry.setDiskSummary(s.store.Summary(blockRoot))
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
// ignore their response and decrease their peer score.
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
if err != nil {
return errors.Wrap(err, "entry filter")
}
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
if err := verifier.ValidFields(); err != nil {
return errors.Wrap(err, "valid fields")
}
if err := verifier.SidecarInclusionProven(); err != nil {
return errors.Wrap(err, "sidecar inclusion proven")
}
if err := verifier.SidecarKzgProofVerified(); err != nil {
return errors.Wrap(err, "sidecar KZG proof verified")
}
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
if err != nil {
return errors.Wrap(err, "verified RO data columns - should never happen")
}
if err := s.store.Save(verifiedRoDataColumns); err != nil {
return errors.Wrap(err, "save data column sidecars")
}
return nil
}
// fullCommitmentsToCheck returns the commitments to check for a given block.
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
// Return early for blocks that are pre-Fulu.
if block.Version() < version.Fulu {
return &safeCommitmentsArray{}, nil
}
// Compute the block epoch.
blockSlot := block.Block().Slot()
blockEpoch := slots.ToEpoch(blockSlot)
// Compute the current epoch.
currentEpoch := slots.ToEpoch(currentSlot)
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
return &safeCommitmentsArray{}, nil
}
// Retrieve the KZG commitments for the block.
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
if err != nil {
return nil, errors.Wrap(err, "blob KZG commitments")
}
// Return early if there are no commitments in the block.
if len(kzgCommitments) == 0 {
return &safeCommitmentsArray{}, nil
}
// Retrieve peer info.
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
if err != nil {
return nil, errors.Wrap(err, "peer info")
}
// Create a safe commitments array for the custody columns.
commitmentsArray := &safeCommitmentsArray{}
commitmentsArraySize := uint64(len(commitmentsArray))
for column := range peerInfo.CustodyColumns {
if column >= commitmentsArraySize {
return nil, errors.Errorf("custody column index %d too high (max allowed %d) - should never happen", column, commitmentsArraySize)
}
commitmentsArray[column] = kzgCommitments
}
return commitmentsArray, nil
}

View File

@@ -0,0 +1,302 @@
package das
import (
"context"
"testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
)
var commitments = [][]byte{
bytesutil.PadTo([]byte("a"), 48),
bytesutil.PadTo([]byte("b"), 48),
bytesutil.PadTo([]byte("c"), 48),
bytesutil.PadTo([]byte("d"), 48),
}
func TestPersist(t *testing.T) {
t.Run("no sidecars", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(0)
require.NoError(t, err)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("mixed roots", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := []util.DataColumnParam{
{Slot: 1, Index: 1},
{Slot: 2, Index: 2},
}
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
require.ErrorIs(t, err, errMixedRoots)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("outside DA period", func(t *testing.T) {
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := []util.DataColumnParam{
{Slot: 1, Index: 1},
}
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
require.NoError(t, err)
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
})
t.Run("nominal", func(t *testing.T) {
const slot = 42
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
dataColumnParamsByBlockRoot := []util.DataColumnParam{
{Slot: slot, Index: 1},
{Slot: slot, Index: 5},
}
roSidecars, roDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
require.NoError(t, err)
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
require.Equal(t, true, ok)
// A call to Persist does NOT save the sidecars to disk.
require.Equal(t, uint64(0), entry.diskSummary.Count())
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
for i, roDataColumn := range entry.scs {
if map[int]bool{1: true, 5: true}[i] {
continue
}
require.IsNil(t, roDataColumn)
}
})
}
func TestIsDataAvailable(t *testing.T) {
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
}
ctx := t.Context()
t.Run("without commitments", func(t *testing.T) {
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
require.NoError(t, err)
})
t.Run("with commitments", func(t *testing.T) {
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
block := signedRoBlock.Block()
slot := block.Slot()
proposerIndex := block.ProposerIndex()
parentRoot := block.ParentRoot()
stateRoot := block.StateRoot()
bodyRoot, err := block.Body().HashTreeRoot()
require.NoError(t, err)
root := signedRoBlock.Root()
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
for _, index := range indices {
dataColumnParams := util.DataColumnParam{
Index: index,
KzgCommitments: commitments,
Slot: slot,
ProposerIndex: proposerIndex,
ParentRoot: parentRoot[:],
StateRoot: stateRoot[:],
BodyRoot: bodyRoot[:],
}
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
}
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
key := cacheKey{root: root}
entry := lazilyPersistentStoreColumns.cache.ensure(key)
defer lazilyPersistentStoreColumns.cache.delete(key)
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
require.NoError(t, err)
}
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
require.NoError(t, err)
actual, err := dataColumnStorage.Get(root, indices[:])
require.NoError(t, err)
summary := dataColumnStorage.Summary(root)
require.Equal(t, uint64(len(indices)), summary.Count())
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
})
}
func TestFullCommitmentsToCheck(t *testing.T) {
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
require.NoError(t, err)
testCases := []struct {
name string
commitments [][]byte
block func(*testing.T) blocks.ROBlock
slot primitives.Slot
}{
{
name: "Pre-Fulu block",
block: func(t *testing.T) blocks.ROBlock {
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
},
},
{
name: "Commitments outside data availability window",
block: func(t *testing.T) blocks.ROBlock {
beaconBlockElectra := util.NewBeaconBlockElectra()
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
return newSignedRoBlock(t, beaconBlockElectra)
},
slot: windowSlots + 1,
},
{
name: "Commitments within data availability window",
block: func(t *testing.T) blocks.ROBlock {
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
signedBeaconBlockFulu.Block.Slot = 100
return newSignedRoBlock(t, signedBeaconBlockFulu)
},
commitments: commitments,
slot: 100,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
numberOfColumns := params.BeaconConfig().NumberOfColumns
b := tc.block(t)
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
require.NoError(t, err)
for _, commitments := range commitmentsArray {
require.DeepEqual(t, tc.commitments, commitments)
}
})
}
}
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
require.NoError(t, err)
rb, err := blocks.NewROBlock(sb)
require.NoError(t, err)
return rb
}
type mockDataColumnsVerifier struct {
t *testing.T
dataColumnSidecars []blocks.RODataColumn
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
}
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
for _, dataColumnSidecar := range m.dataColumnSidecars {
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
}
return verifiedDataColumnSidecars, nil
}
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
func (m *mockDataColumnsVerifier) ValidFields() error {
m.validCalled = true
return nil
}
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
return nil
}
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
return nil
}
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
return nil
}
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
m.SidecarInclusionProvenCalled = true
return nil
}
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
m.SidecarKzgProofVerifiedCalled = true
return nil
}
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }

View File

@@ -15,5 +15,12 @@ import (
// durably persisted before returning a non-error value.
type AvailabilityStore interface {
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
AvailabilityChecker
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
}
// AvailabilityChecker is the minimum interface needed to check if data is available for a block.
// We should prefer this interface over AvailabilityStore in places where we don't need to persist blob data.
type AvailabilityChecker interface {
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
}

View File

@@ -13,7 +13,7 @@ type MockAvailabilityStore struct {
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
}
var _ AvailabilityStore = &MockAvailabilityStore{}
var _ AvailabilityChecker = &MockAvailabilityStore{}
// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {

View File

@@ -116,43 +116,19 @@ func (l *periodicEpochLayout) pruneBefore(before primitives.Epoch) (*pruneSummar
}
// Roll up summaries and clean up per-epoch directories.
rollup := &pruneSummary{}
// Track which period directories might be empty after epoch removal
periodsToCheck := make(map[string]struct{})
for epoch, sum := range sums {
rollup.blobsPruned += sum.blobsPruned
rollup.failedRemovals = append(rollup.failedRemovals, sum.failedRemovals...)
rmdir := l.epochDir(epoch)
periodDir := l.periodDir(epoch)
if len(sum.failedRemovals) == 0 {
if err := l.fs.Remove(rmdir); err != nil {
log.WithField("dir", rmdir).WithError(err).Error("Failed to remove epoch directory while pruning")
} else {
periodsToCheck[periodDir] = struct{}{}
}
} else {
log.WithField("dir", rmdir).WithField("numFailed", len(sum.failedRemovals)).WithError(err).Error("Unable to remove epoch directory due to pruning failures")
}
}
//Clean up empty period directories
for periodDir := range periodsToCheck {
entries, err := afero.ReadDir(l.fs, periodDir)
if err != nil {
log.WithField("dir", periodDir).WithError(err).Debug("Failed to read period directory contents")
continue
}
// Only attempt to remove if directory is empty
if len(entries) == 0 {
if err := l.fs.Remove(periodDir); err != nil {
log.WithField("dir", periodDir).WithError(err).Error("Failed to remove empty period directory")
}
}
}
return rollup, nil
}

View File

@@ -4,7 +4,6 @@ import (
"encoding/binary"
"os"
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
"github.com/OffchainLabs/prysm/v6/config/params"
@@ -196,48 +195,3 @@ func TestLayoutPruneBefore(t *testing.T) {
})
}
}
func TestLayoutByEpochPruneBefore(t *testing.T) {
roots := testRoots(10)
cases := []struct {
name string
pruned []testIdent
remain []testIdent
err error
sum pruneSummary
}{
{
name: "single epoch period cleanup",
pruned: []testIdent{
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 367076, index: 0}},
},
remain: []testIdent{
{offset: 0, blobIdent: blobIdent{root: roots[1], epoch: 371176, index: 0}}, // Different period
},
sum: pruneSummary{blobsPruned: 1},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
fs, bs := NewEphemeralBlobStorageAndFs(t, WithLayout(LayoutNameByEpoch))
pruned := testSetupBlobIdentPaths(t, fs, bs, c.pruned)
remain := testSetupBlobIdentPaths(t, fs, bs, c.remain)
time.Sleep(1 * time.Second)
for _, id := range pruned {
_, err := fs.Stat(bs.layout.sszPath(id))
require.Equal(t, true, os.IsNotExist(err))
dirs := bs.layout.blockParentDirs(id)
for i := len(dirs) - 1; i > 0; i-- {
_, err = fs.Stat(dirs[i])
require.Equal(t, true, os.IsNotExist(err))
}
}
for _, id := range remain {
_, err := fs.Stat(bs.layout.sszPath(id))
require.NoError(t, err)
}
})
}
}

View File

@@ -318,7 +318,6 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
}
beacon.BlobStorage.WarmCache()
beacon.DataColumnStorage.WarmCache()
log.Debugln("Starting Slashing DB")
if err := beacon.startSlasherDB(cliCtx, clearer); err != nil {
@@ -941,7 +940,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
FinalizationFetcher: chainService,
BlockReceiver: chainService,
BlobReceiver: chainService,
DataColumnReceiver: chainService,
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,
@@ -969,7 +967,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
Router: router,
ClockWaiter: b.clockWaiter,
BlobStorage: b.BlobStorage,
DataColumnStorage: b.DataColumnStorage,
TrackedValidatorsCache: b.trackedValidatorsCache,
PayloadIDCache: b.payloadIDCache,
LCStore: b.lcStore,
@@ -1111,7 +1108,7 @@ func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.DataColumnStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
if err != nil {
return errors.Wrap(err, "error initializing backfill service")
}

View File

@@ -195,6 +195,7 @@ go_test(
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],

View File

@@ -155,7 +155,6 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
log := log.WithFields(logrus.Fields{
"peerID": pid,
"defaultValue": custodyRequirement,
"agent": agentString(pid, s.Host()),
})
// Retrieve the ENR of the peer.

View File

@@ -8,7 +8,6 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
@@ -270,7 +269,6 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
service := &Service{
peers: peers,
metaData: tc.metadata,
host: testp2p.NewTestP2P(t).Host(),
}
// Retrieve the custody count from the remote peer.
@@ -331,7 +329,6 @@ func TestCustodyGroupCountFromPeerENR(t *testing.T) {
service := &Service{
peers: peers,
host: testp2p.NewTestP2P(t).Host(),
}
actual := service.custodyGroupCountFromPeerENR(pid)

View File

@@ -443,27 +443,20 @@ func (s *Service) findPeers(ctx context.Context, missingPeerCount uint) ([]*enod
return peersToDial, ctx.Err()
}
node := iterator.Node()
// Remove duplicates, keeping the node with higher seq.
existing, ok := nodeByNodeID[node.ID()]
if ok && existing.Seq() >= node.Seq() {
continue // keep existing and skip.
}
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
// Skip peer not matching the filter.
node := iterator.Node()
if !s.filterPeer(node) {
if ok {
// this means the existing peer with the lower sequence number is no longer valid
delete(nodeByNodeID, existing.ID())
missingPeerCount++
}
continue
}
// We found a new peer. Decrease the missing peer count.
// Remove duplicates, keeping the node with higher seq.
existing, ok := nodeByNodeID[node.ID()]
if ok && existing.Seq() > node.Seq() {
continue
}
nodeByNodeID[node.ID()] = node
// We found a new peer. Decrease the missing peer count.
missingPeerCount--
}
@@ -684,7 +677,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
peerData, multiAddrs, err := convertToAddrInfo(node)
if err != nil {
log.WithError(err).WithField("node", node.String()).Debug("Could not convert to peer data")
log.WithError(err).Debug("Could not convert to peer data")
return false
}
@@ -851,7 +844,7 @@ func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
multiAddrs, err := retrieveMultiAddrsFromNode(node)
if err != nil {
return nil, nil, errors.Wrap(err, "retrieve multiaddrs from node")
return nil, nil, err
}
if len(multiAddrs) == 0 {

View File

@@ -1,11 +1,9 @@
package p2p
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/rand"
"crypto/sha256"
"fmt"
mathRand "math/rand"
"net"
@@ -60,81 +58,6 @@ func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
return ipAddr, pkey
}
// createTestNodeWithID creates a LocalNode for testing with deterministic private key
// This is needed for deduplication tests where we need the same node ID across different sequence numbers
func createTestNodeWithID(t *testing.T, id string) *enode.LocalNode {
// Create a deterministic reader based on the ID for consistent key generation
h := sha256.New()
h.Write([]byte(id))
seedBytes := h.Sum(nil)
// Create a deterministic reader using the seed
deterministicReader := bytes.NewReader(seedBytes)
// Generate the private key using the same approach as the production code
privKey, _, err := crypto.GenerateSecp256k1Key(deterministicReader)
require.NoError(t, err)
// Convert to ECDSA private key for enode usage
ecdsaPrivKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(privKey)
require.NoError(t, err)
db, err := enode.OpenDB("")
require.NoError(t, err)
t.Cleanup(func() { db.Close() })
localNode := enode.NewLocalNode(db, ecdsaPrivKey)
// Set basic properties
localNode.SetStaticIP(net.ParseIP("127.0.0.1"))
localNode.Set(enr.TCP(3000))
localNode.Set(enr.UDP(3000))
localNode.Set(enr.WithEntry(eth2EnrKey, make([]byte, 16)))
return localNode
}
// createTestNodeRandom creates a LocalNode for testing using the existing createAddrAndPrivKey function
func createTestNodeRandom(t *testing.T) *enode.LocalNode {
_, privKey := createAddrAndPrivKey(t)
db, err := enode.OpenDB("")
require.NoError(t, err)
t.Cleanup(func() { db.Close() })
localNode := enode.NewLocalNode(db, privKey)
// Set basic properties
localNode.SetStaticIP(net.ParseIP("127.0.0.1"))
localNode.Set(enr.TCP(3000))
localNode.Set(enr.UDP(3000))
localNode.Set(enr.WithEntry(eth2EnrKey, make([]byte, 16)))
return localNode
}
// setNodeSeq updates a LocalNode to have the specified sequence number
func setNodeSeq(localNode *enode.LocalNode, seq uint64) {
// Force set the sequence number - we need to update the record seq-1 times
// because it starts at 1
currentSeq := localNode.Node().Seq()
for currentSeq < seq {
localNode.Set(enr.WithEntry("dummy", currentSeq))
currentSeq++
}
}
// setNodeSubnets sets the attestation subnets for a LocalNode
func setNodeSubnets(localNode *enode.LocalNode, attSubnets []uint64) {
if len(attSubnets) > 0 {
bitV := bitfield.NewBitvector64()
for _, subnet := range attSubnets {
bitV.SetBitAt(subnet, true)
}
localNode.Set(enr.WithEntry(attSubnetEnrKey, &bitV))
}
}
func TestCreateListener(t *testing.T) {
port := 1024
ipAddr, pkey := createAddrAndPrivKey(t)
@@ -318,7 +241,7 @@ func TestCreateLocalNode(t *testing.T) {
// Check fork is set.
fork := new([]byte)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2EnrKey, fork)))
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2ENRKey, fork)))
require.NotEmpty(t, *fork)
// Check att subnets.
@@ -569,7 +492,7 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
node := enode.NewLocalNode(db, key)
node.Set(enr.IPv4{127, 0, 0, 1})
node.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
s := &Service{dv5Listener: testp2p.NewMockListener(node, nil)}
s := &Service{dv5Listener: mockListener{localNode: node}}
multiAddresses, err := s.DiscoveryAddresses()
require.NoError(t, err)
@@ -594,7 +517,7 @@ func TestDiscoveryV5_SeqNumber(t *testing.T) {
node := enode.NewLocalNode(db, key)
node.Set(enr.IPv4{127, 0, 0, 1})
currentSeq := node.Seq()
s := &Service{dv5Listener: testp2p.NewMockListener(node, nil)}
s := &Service{dv5Listener: mockListener{localNode: node}}
_, err = s.DiscoveryAddresses()
require.NoError(t, err)
newSeq := node.Seq()
@@ -606,7 +529,7 @@ func TestDiscoveryV5_SeqNumber(t *testing.T) {
nodeTwo.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
seqTwo := nodeTwo.Seq()
assert.NotEqual(t, seqTwo, newSeq)
sTwo := &Service{dv5Listener: testp2p.NewMockListener(nodeTwo, nil)}
sTwo := &Service{dv5Listener: mockListener{localNode: nodeTwo}}
_, err = sTwo.DiscoveryAddresses()
require.NoError(t, err)
assert.Equal(t, seqTwo+1, nodeTwo.Seq())
@@ -963,289 +886,3 @@ func TestRefreshPersistentSubnets(t *testing.T) {
// Reset the config.
params.OverrideBeaconConfig(defaultCfg)
}
func TestFindPeers_NodeDeduplication(t *testing.T) {
params.SetupTestConfigCleanup(t)
cache.SubnetIDs.EmptyAllCaches()
defer cache.SubnetIDs.EmptyAllCaches()
ctx := t.Context()
// Create LocalNodes and manipulate sequence numbers
localNode1 := createTestNodeWithID(t, "node1")
localNode2 := createTestNodeWithID(t, "node2")
localNode3 := createTestNodeWithID(t, "node3")
// Create different sequence versions of node1
setNodeSeq(localNode1, 1)
node1_seq1 := localNode1.Node()
setNodeSeq(localNode1, 2)
node1_seq2 := localNode1.Node() // Same ID, higher seq
setNodeSeq(localNode1, 3)
node1_seq3 := localNode1.Node() // Same ID, even higher seq
// Other nodes with seq 1
node2_seq1 := localNode2.Node()
node3_seq1 := localNode3.Node()
tests := []struct {
name string
nodes []*enode.Node
missingPeers uint
expectedCount int
description string
eval func(t *testing.T, result []*enode.Node)
}{
{
name: "No duplicates - all unique nodes",
nodes: []*enode.Node{
node2_seq1,
node3_seq1,
},
missingPeers: 2,
expectedCount: 2,
description: "Should return all unique nodes without deduplication",
eval: nil, // No special validation needed
},
{
name: "Duplicate with lower seq comes first - should replace",
nodes: []*enode.Node{
node1_seq1,
node1_seq2, // Higher seq, should replace
node2_seq1, // Different node added after duplicates are processed
},
missingPeers: 2, // Need 2 peers so we process all nodes
expectedCount: 2, // Should get node1 (with higher seq) and node2
description: "Should keep node with higher sequence number when duplicate found",
eval: func(t *testing.T, result []*enode.Node) {
// Should have node2 and node1 with higher seq (node1_seq2)
foundNode1WithHigherSeq := false
for _, node := range result {
if node.ID() == node1_seq2.ID() {
require.Equal(t, node1_seq2.Seq(), node.Seq(), "Node1 should have higher seq")
foundNode1WithHigherSeq = true
}
}
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with higher seq")
},
},
{
name: "Duplicate with higher seq comes first - should keep existing",
nodes: []*enode.Node{
node1_seq3, // Higher seq
node1_seq2, // Lower seq, should be skipped (continue branch)
node1_seq1, // Even lower seq, should also be skipped (continue branch)
node2_seq1, // Different node added after duplicates are processed
},
missingPeers: 2,
expectedCount: 2,
description: "Should keep existing node when it has higher sequence number and skip all lower seq duplicates",
eval: func(t *testing.T, result []*enode.Node) {
// Should have kept the node with highest seq (node1_seq3)
foundNode1WithHigherSeq := false
for _, node := range result {
if node.ID() == node1_seq3.ID() {
require.Equal(t, node1_seq3.Seq(), node.Seq(), "Node1 should have highest seq")
foundNode1WithHigherSeq = true
}
}
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with highest seq")
},
},
{
name: "Multiple duplicates with increasing seq",
nodes: []*enode.Node{
node1_seq1,
node1_seq2, // Should replace seq1
node1_seq3, // Should replace seq2
node2_seq1, // Different node added after duplicates are processed
},
missingPeers: 2,
expectedCount: 2,
description: "Should keep updating to highest sequence number",
eval: func(t *testing.T, result []*enode.Node) {
// Should have the node with highest seq (node1_seq3)
foundNode1WithHigherSeq := false
for _, node := range result {
if node.ID() == node1_seq3.ID() {
require.Equal(t, node1_seq3.Seq(), node.Seq(), "Node1 should have highest seq")
foundNode1WithHigherSeq = true
}
}
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with highest seq")
},
},
{
name: "Duplicate with equal seq comes after - should skip",
nodes: []*enode.Node{
node1_seq2, // First occurrence
node1_seq2, // Same exact node instance, should be skipped (continue branch for >= case)
node2_seq1, // Different node
},
missingPeers: 2,
expectedCount: 2,
description: "Should skip duplicate with equal sequence number",
eval: func(t *testing.T, result []*enode.Node) {
// Should have exactly one instance of node1_seq2 and one instance of node2_seq1
foundNode1 := false
foundNode2 := false
for _, node := range result {
if node.ID() == node1_seq2.ID() {
require.Equal(t, node1_seq2.Seq(), node.Seq(), "Node1 should have the expected seq")
require.Equal(t, false, foundNode1, "Should have only one instance of node1") // Ensure no duplicates
foundNode1 = true
}
if node.ID() == node2_seq1.ID() {
foundNode2 = true
}
}
require.Equal(t, true, foundNode1, "Should have node1")
require.Equal(t, true, foundNode2, "Should have node2")
},
},
{
name: "Mix of unique and duplicate nodes",
nodes: []*enode.Node{
node1_seq1,
node2_seq1,
node1_seq2, // Should replace node1_seq1
node3_seq1,
node1_seq3, // Should replace node1_seq2
},
missingPeers: 3,
expectedCount: 3,
description: "Should handle mix of unique nodes and duplicates correctly",
eval: nil, // Basic count validation is sufficient
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fakePeer := testp2p.NewTestP2P(t)
s := &Service{
cfg: &Config{
MaxPeers: 30,
},
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
peers: peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
}),
host: fakePeer.BHost,
}
localNode := createTestNodeRandom(t)
mockIter := testp2p.NewMockIterator(tt.nodes)
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
result, err := s.findPeers(ctxWithTimeout, tt.missingPeers)
require.NoError(t, err, tt.description)
require.Equal(t, tt.expectedCount, len(result), tt.description)
if tt.eval != nil {
tt.eval(t, result)
}
})
}
}
// callbackIterator allows us to execute callbacks at specific points during iteration
type callbackIterator struct {
nodes []*enode.Node
index int
callbacks map[int]func() // map from index to callback function
}
func (c *callbackIterator) Next() bool {
// Execute callback before checking if we can continue (if one exists)
if callback, exists := c.callbacks[c.index]; exists {
callback()
}
return c.index < len(c.nodes)
}
func (c *callbackIterator) Node() *enode.Node {
if c.index >= len(c.nodes) {
return nil
}
node := c.nodes[c.index]
c.index++
return node
}
func (c *callbackIterator) Close() {
// Nothing to clean up for this simple implementation
}
func TestFindPeers_received_bad_existing_node(t *testing.T) {
// This test successfully triggers delete(nodeByNodeID, node.ID()) in subnets.go by:
// 1. Processing node1_seq1 first (passes filterPeer, gets added to map
// 2. Callback marks peer as bad before processing node1_seq2"
// 3. Processing node1_seq2 (fails filterPeer, triggers delete since ok=true
params.SetupTestConfigCleanup(t)
cache.SubnetIDs.EmptyAllCaches()
defer cache.SubnetIDs.EmptyAllCaches()
// Create LocalNode with same ID but different sequences
localNode1 := createTestNodeWithID(t, "testnode")
node1_seq1 := localNode1.Node() // Get current node
currentSeq := node1_seq1.Seq()
setNodeSeq(localNode1, currentSeq+1) // Increment sequence by 1
node1_seq2 := localNode1.Node() // This should have higher seq
// Additional node to ensure we have enough peers to process
localNode2 := createTestNodeWithID(t, "othernode")
node2 := localNode2.Node()
fakePeer := testp2p.NewTestP2P(t)
service := &Service{
cfg: &Config{
MaxPeers: 30,
},
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
}),
host: fakePeer.BHost,
}
// Create iterator with callback that marks peer as bad before processing node1_seq2
iter := &callbackIterator{
nodes: []*enode.Node{node1_seq1, node1_seq2, node2},
index: 0,
callbacks: map[int]func(){
1: func() { // Before processing node1_seq2 (index 1)
// Mark peer as bad before processing node1_seq2
peerData, _, _ := convertToAddrInfo(node1_seq2)
if peerData != nil {
service.peers.Add(node1_seq2.Record(), peerData.ID, nil, network.DirUnknown)
// Mark as bad peer - need enough increments to exceed threshold (6)
for i := 0; i < 10; i++ {
service.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
}
}
},
},
}
localNode := createTestNodeRandom(t)
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
// Run findPeers - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
ctxWithTimeout, cancel := context.WithTimeout(t.Context(), 1*time.Second)
defer cancel()
result, err := service.findPeers(ctxWithTimeout, 3)
require.NoError(t, err)
require.Equal(t, 1, len(result))
}

View File

@@ -13,17 +13,10 @@ import (
"github.com/sirupsen/logrus"
)
var (
errForkScheduleMismatch = errors.New("peer fork schedule incompatible")
errCurrentDigestMismatch = errors.Wrap(errForkScheduleMismatch, "current_fork_digest mismatch")
errNextVersionMismatch = errors.Wrap(errForkScheduleMismatch, "next_fork_version mismatch")
errNextDigestMismatch = errors.Wrap(errForkScheduleMismatch, "nfd (next fork digest) mismatch")
)
var errEth2ENRDigestMismatch = errors.New("fork digest of peer does not match local value")
const (
eth2EnrKey = "eth2" // The `eth2` ENR entry advertizes the node's view of the fork schedule with an ssz-encoded ENRForkID value.
nfdEnrKey = "nfd" // The `nfd` ENR entry separately advertizes the "next fork digest" aspect of the fork schedule.
)
// ENR key used for Ethereum consensus-related fork data.
var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
// ForkDigest returns the current fork digest of
// the node according to the local clock.
@@ -40,86 +33,44 @@ func (s *Service) currentForkDigest() ([4]byte, error) {
// Compares fork ENRs between an incoming peer's record and our node's
// local record values for current and next fork version/epoch.
func compareForkENR(self, peer *enr.Record) error {
peerEntry, err := forkEntry(peer)
peerForkENR, err := forkEntry(peer)
if err != nil {
return err
}
selfEntry, err := forkEntry(self)
currentForkENR, err := forkEntry(self)
if err != nil {
return err
}
peerString, err := SerializeENR(peer)
enrString, err := SerializeENR(peer)
if err != nil {
return err
}
// Clients SHOULD connect to peers with current_fork_digest, next_fork_version,
// and next_fork_epoch that match local values.
if !bytes.Equal(peerEntry.CurrentForkDigest, selfEntry.CurrentForkDigest) {
return errors.Wrapf(errCurrentDigestMismatch,
if !bytes.Equal(peerForkENR.CurrentForkDigest, currentForkENR.CurrentForkDigest) {
return errors.Wrapf(errEth2ENRDigestMismatch,
"fork digest of peer with ENR %s: %v, does not match local value: %v",
peerString,
peerEntry.CurrentForkDigest,
selfEntry.CurrentForkDigest,
enrString,
peerForkENR.CurrentForkDigest,
currentForkENR.CurrentForkDigest,
)
}
// Clients MAY connect to peers with the same current_fork_version but a
// different next_fork_version/next_fork_epoch. Unless ENRForkID is manually
// updated to matching prior to the earlier next_fork_epoch of the two clients,
// these type of connecting clients will be unable to successfully interact
// starting at the earlier next_fork_epoch.
if peerEntry.NextForkEpoch != selfEntry.NextForkEpoch {
if peerForkENR.NextForkEpoch != currentForkENR.NextForkEpoch {
log.WithFields(logrus.Fields{
"peerNextForkEpoch": peerEntry.NextForkEpoch,
"peerNextForkVersion": peerEntry.NextForkVersion,
"peerENR": peerString,
"peerNextForkEpoch": peerForkENR.NextForkEpoch,
"peerENR": enrString,
}).Trace("Peer matches fork digest but has different next fork epoch")
// We allow the connection because we have a different view of the next fork epoch. This
// could be due to peers that have no upgraded ahead of a fork or BPO schedule change, so
// we allow the connection to continue until the fork boundary.
return nil
}
// Since we agree on the next fork epoch, we require next fork version to also be in agreement.
if !bytes.Equal(peerEntry.NextForkVersion, selfEntry.NextForkVersion) {
return errors.Wrapf(errNextVersionMismatch,
"next fork version of peer with ENR %s: %#x, does not match local value: %#x",
peerString, peerEntry.NextForkVersion, selfEntry.NextForkVersion)
}
// Fulu adds the following to the spec:
// ---
// A new entry is added to the ENR under the key nfd, short for next fork digest. This entry
// communicates the digest of the next scheduled fork, regardless of whether it is a regular
// or a Blob-Parameters-Only fork. This new entry MUST be added once FULU_FORK_EPOCH is assigned
// any value other than FAR_FUTURE_EPOCH. Adding this entry prior to the Fulu fork will not
// impact peering as nodes will ignore unknown ENR entries and nfd mismatches do not cause
// disconnects.
// When discovering and interfacing with peers, nodes MUST evaluate nfd alongside their existing
// consideration of the ENRForkID::next_* fields under the eth2 key, to form a more accurate
// view of the peer's intended next fork for the purposes of sustained peering. If there is a
// mismatch, the node MUST NOT disconnect before the fork boundary, but it MAY disconnect
// at/after the fork boundary.
// Nodes unprepared to follow the Fulu fork will be unaware of nfd entries. However, their
// existing comparison of eth2 entries (concretely next_fork_epoch) is sufficient to detect
// upcoming divergence.
// ---
// Because this is a new in-bound connection, we lean into the pre-fulu point that clients
// MAY connect to peers with the same current_fork_version but a different
// next_fork_version/next_fork_epoch, which implies we can chose to not connect to them when these
// don't match.
//
// Given that the next_fork_epoch matches, we will require the next_fork_digest to match.
if !params.FuluEnabled() {
return nil
}
peerNFD, selfNFD := nfd(peer), nfd(self)
if peerNFD != selfNFD {
return errors.Wrapf(errNextDigestMismatch,
"next fork digest of peer with ENR %s: %v, does not match local value: %v",
peerString, peerNFD, selfNFD)
if !bytes.Equal(peerForkENR.NextForkVersion, currentForkENR.NextForkVersion) {
log.WithFields(logrus.Fields{
"peerNextForkVersion": peerForkENR.NextForkVersion,
"peerENR": enrString,
}).Trace("Peer matches fork digest but has different next fork version")
}
return nil
}
@@ -151,7 +102,7 @@ func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) e
if err != nil {
return err
}
forkEntry := enr.WithEntry(eth2EnrKey, enc)
forkEntry := enr.WithEntry(eth2ENRKey, enc)
node.Set(forkEntry)
return nil
}
@@ -160,7 +111,7 @@ func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) e
// under the Ethereum consensus EnrKey
func forkEntry(record *enr.Record) (*pb.ENRForkID, error) {
sszEncodedForkEntry := make([]byte, 16)
entry := enr.WithEntry(eth2EnrKey, &sszEncodedForkEntry)
entry := enr.WithEntry(eth2ENRKey, &sszEncodedForkEntry)
err := record.Load(entry)
if err != nil {
return nil, err
@@ -171,15 +122,3 @@ func forkEntry(record *enr.Record) (*pb.ENRForkID, error) {
}
return forkEntry, nil
}
// nfd retrieves the value of the `nfd` ("next fork digest") key from an ENR record.
func nfd(record *enr.Record) [4]byte {
digest := [4]byte{}
entry := enr.WithEntry(nfdEnrKey, &digest)
if err := record.Load(entry); err != nil {
// Treat a missing nfd entry as an empty digest.
// We do this to avoid errors when checking peers that have not upgraded for fulu.
return [4]byte{}
}
return digest
}

View File

@@ -16,12 +16,14 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/sirupsen/logrus"
logTest "github.com/sirupsen/logrus/hooks/test"
)
func TestCompareForkENR(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096
params.BeaconConfig().InitializeForkSchedule()
logrus.SetLevel(logrus.TraceLevel)
db, err := enode.OpenDB("")
assert.NoError(t, err)
@@ -59,10 +61,10 @@ func TestCompareForkENR(t *testing.T) {
require.NoError(t, updateENR(peer, currentCopy, next))
return peer.Node()
},
expectErr: errCurrentDigestMismatch,
expectErr: errEth2ENRDigestMismatch,
},
{
name: "next_fork_epoch match, next_fork_version mismatch",
name: "next fork version mismatch",
node: func(t *testing.T) *enode.Node {
// Create a peer with the same current fork digest and next fork version/epoch.
peer := enode.NewLocalNode(db, k)
@@ -73,44 +75,25 @@ func TestCompareForkENR(t *testing.T) {
require.NoError(t, updateENR(peer, current, nextCopy))
return peer.Node()
},
expectErr: errNextVersionMismatch,
expectLog: "Peer matches fork digest but has different next fork version",
},
{
name: "next fork epoch mismatch, next fork digest mismatch",
name: "next fork epoch mismatch",
node: func(t *testing.T) *enode.Node {
// Create a peer with the same current fork digest and next fork version/epoch.
peer := enode.NewLocalNode(db, k)
nextCopy := next
// next epoch does not match, and neither does the next fork digest.
nextCopy.Epoch = nextCopy.Epoch + 1
nfd := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
require.NotEqual(t, next.ForkDigest, nfd)
//peer.Set(enr.WithEntry(nfdEnrKey, nfd[:]))
nextCopy.ForkDigest = nfd
require.NoError(t, updateENR(peer, current, nextCopy))
return peer.Node()
},
// no error because we allow a different next fork version / digest if the next fork epoch does not match
},
{
name: "next fork epoch -match-, next fork digest mismatch",
node: func(t *testing.T) *enode.Node {
peer := enode.NewLocalNode(db, k)
nextCopy := next
nfd := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
// next epoch *does match*, but the next fork digest doesn't - so we should get an error.
require.NotEqual(t, next.ForkDigest, nfd)
nextCopy.ForkDigest = nfd
//peer.Set(enr.WithEntry(nfdEnrKey, nfd[:]))
require.NoError(t, updateENR(peer, current, nextCopy))
return peer.Node()
},
expectErr: errNextDigestMismatch,
expectLog: "Peer matches fork digest but has different next fork epoch",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
hook := logTest.NewGlobal()
peer := c.node(t)
err := compareForkENR(self.Node().Record(), peer.Record())
if c.expectErr != nil {
@@ -118,27 +101,13 @@ func TestCompareForkENR(t *testing.T) {
} else {
require.NoError(t, err, "Expected no error comparing fork ENRs")
}
if c.expectLog != "" {
require.LogsContain(t, hook, c.expectLog, "Expected log message not found")
}
})
}
}
func TestNfdSetAndLoad(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096
params.BeaconConfig().InitializeForkSchedule()
db, err := enode.OpenDB("")
assert.NoError(t, err)
_, k := createAddrAndPrivKey(t)
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
next.ForkDigest = [4]byte{0xFF, 0xFF, 0xFF, 0xFF} // Ensure a unique digest for testing.
self := enode.NewLocalNode(db, k)
require.NoError(t, updateENR(self, current, next))
n := nfd(self.Node().Record())
assert.Equal(t, next.ForkDigest, n, "Expected nfd to match next fork digest")
}
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
params.SetupTestConfigCleanup(t)
params.BeaconConfig().InitializeForkSchedule()
@@ -153,7 +122,7 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
}
enc, err := enrForkID.MarshalSSZ()
require.NoError(t, err)
entry := enr.WithEntry(eth2EnrKey, enc)
entry := enr.WithEntry(eth2ENRKey, enc)
temp := t.TempDir()
randNum := rand.Int()
tempPath := path.Join(temp, strconv.Itoa(randNum))

View File

@@ -18,7 +18,6 @@ var (
"lodestar",
"js-libp2p",
"rust-libp2p",
"erigon/caplin",
}
p2pPeerCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "p2p_peer_count",

View File

@@ -42,7 +42,7 @@ func (a *Assigner) freshPeers() ([]peer.ID, error) {
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
_, peers := a.ps.BestFinalized(params.BeaconConfig().MaxPeersToSync, a.fc.FinalizedCheckpoint().Epoch)
_, peers := a.ps.BestFinalized(-1, a.fc.FinalizedCheckpoint().Epoch)
if len(peers) < required {
log.WithFields(logrus.Fields{
"suitable": len(peers),
@@ -52,27 +52,33 @@ func (a *Assigner) freshPeers() ([]peer.ID, error) {
return peers, nil
}
type AssignmentFilter func([]peer.ID) []peer.ID
// Assign uses the "BestFinalized" method to select the best peers that agree on a canonical block
// for the configured finalized epoch. At most `n` peers will be returned. The `busy` param can be used
// to filter out peers that we know we don't want to connect to, for instance if we are trying to limit
// the number of outbound requests to each peer from a given component.
func (a *Assigner) Assign(busy map[peer.ID]bool, n int) ([]peer.ID, error) {
func (a *Assigner) Assign(filter AssignmentFilter) ([]peer.ID, error) {
best, err := a.freshPeers()
if err != nil {
return nil, err
}
return pickBest(busy, n, best), nil
return filter(best), nil
}
func pickBest(busy map[peer.ID]bool, n int, best []peer.ID) []peer.ID {
ps := make([]peer.ID, 0, n)
for _, p := range best {
if len(ps) == n {
return ps
}
if !busy[p] {
ps = append(ps, p)
// NotBusy is a filter that returns a list of peer.IDs with len() <= n, which are not in the `busy` map.
// n == -1 will return all peers that are not busy.
func NotBusy(busy map[peer.ID]bool, n int) AssignmentFilter {
return func(peers []peer.ID) []peer.ID {
ps := make([]peer.ID, 0)
for _, p := range peers {
if n > 0 && len(ps) == n {
return ps
}
if !busy[p] {
ps = append(ps, p)
}
}
return ps
}
return ps
}

View File

@@ -18,8 +18,9 @@ func TestPickBest(t *testing.T) {
expected []peer.ID
}{
{
name: "",
n: 0,
name: "don't limit",
n: 0,
expected: best,
},
{
name: "none busy",
@@ -88,7 +89,8 @@ func TestPickBest(t *testing.T) {
if c.best == nil {
c.best = best
}
pb := pickBest(c.busy, c.n, c.best)
filt := NotBusy(c.busy, c.n)
pb := filt(c.best)
require.Equal(t, len(c.expected), len(pb))
for i := range c.expected {
require.Equal(t, c.expected[i], pb[i])

View File

@@ -770,7 +770,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch primitives.Epoch)
}
// Trim potential peers to at most maxPeers.
if len(potentialPIDs) > maxPeers {
if maxPeers > 0 && len(potentialPIDs) > maxPeers {
potentialPIDs = potentialPIDs[:maxPeers]
}

View File

@@ -10,7 +10,6 @@ import (
pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -41,68 +40,49 @@ func (s *Service) setAllForkDigests() {
}
}
var (
errNotReadyToSubscribe = fmt.Errorf("not ready to subscribe, service is not initialized")
errMissingLeadingSlash = fmt.Errorf("topic is missing leading slash")
errTopicMissingProtocolVersion = fmt.Errorf("topic is missing protocol version (eth2)")
errTopicPathWrongPartCount = fmt.Errorf("topic path has wrong part count")
errDigestInvalid = fmt.Errorf("digest is invalid")
errDigestUnexpected = fmt.Errorf("digest is unexpected")
errSnappySuffixMissing = fmt.Errorf("snappy suffix is missing")
errTopicNotFound = fmt.Errorf("topic not found in gossip topic mappings")
)
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
func (s *Service) CanSubscribe(topic string) bool {
if err := s.checkSubscribable(topic); err != nil {
if !errors.Is(err, errNotReadyToSubscribe) {
logrus.WithError(err).WithField("topic", topic).Debug("CanSubscribe failed")
}
return false
}
return true
}
func (s *Service) checkSubscribable(topic string) error {
if !s.isInitialized() {
return errNotReadyToSubscribe
return false
}
parts := strings.Split(topic, "/")
if len(parts) != 5 {
return errTopicPathWrongPartCount
return false
}
// The topic must start with a slash, which means the first part will be empty.
if parts[0] != "" {
return errMissingLeadingSlash
return false
}
protocol, rawDigest, suffix := parts[1], parts[2], parts[4]
if protocol != "eth2" {
return errTopicMissingProtocolVersion
}
if suffix != encoder.ProtocolSuffixSSZSnappy {
return errSnappySuffixMissing
if parts[1] != "eth2" {
return false
}
var digest [4]byte
dl, err := hex.Decode(digest[:], []byte(rawDigest))
if err != nil {
return errors.Wrapf(errDigestInvalid, "%v", err)
dl, err := hex.Decode(digest[:], []byte(parts[2]))
if err == nil && dl != 4 {
err = fmt.Errorf("expected 4 bytes, got %d", dl)
}
if dl != 4 {
return errors.Wrapf(errDigestInvalid, "wrong byte length")
if err != nil {
log.WithError(err).WithField("topic", topic).WithField("digest", parts[2]).Error("CanSubscribe failed to parse message")
return false
}
if _, ok := s.allForkDigests[digest]; !ok {
return errDigestUnexpected
log.WithField("topic", topic).WithField("digest", fmt.Sprintf("%#x", digest)).Error("CanSubscribe failed to find digest in allForkDigests")
return false
}
if parts[4] != encoder.ProtocolSuffixSSZSnappy {
return false
}
// Check the incoming topic matches any topic mapping. This includes a check for part[3].
for gt := range gossipTopicMappings {
if _, err := scanfcheck(strings.Join(parts[0:4], "/"), gt); err == nil {
return nil
return true
}
}
return errTopicNotFound
return false
}
// FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications.
@@ -120,22 +100,7 @@ func (s *Service) FilterIncomingSubscriptions(peerID peer.ID, subs []*pubsubpb.R
return nil, pubsub.ErrTooManySubscriptions
}
return pubsub.FilterSubscriptions(subs, s.logCheckSubscribableError(peerID)), nil
}
func (s *Service) logCheckSubscribableError(pid peer.ID) func(string) bool {
return func(topic string) bool {
if err := s.checkSubscribable(topic); err != nil {
if !errors.Is(err, errNotReadyToSubscribe) {
log.WithError(err).WithFields(logrus.Fields{
"peerID": pid,
"topic": topic,
}).Debug("Peer subscription rejected")
}
return false
}
return true
}
return pubsub.FilterSubscriptions(subs, s.CanSubscribe), nil
}
// scanfcheck uses fmt.Sscanf to check that a given string matches expected format. This method

View File

@@ -13,13 +13,13 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
prysmTime "github.com/OffchainLabs/prysm/v6/time"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
@@ -30,6 +30,48 @@ import (
const testPingInterval = 100 * time.Millisecond
type mockListener struct {
localNode *enode.LocalNode
}
func (m mockListener) Self() *enode.Node {
return m.localNode.Node()
}
func (mockListener) Close() {
// no-op
}
func (mockListener) Lookup(enode.ID) []*enode.Node {
panic("implement me")
}
func (mockListener) ReadRandomNodes(_ []*enode.Node) int {
panic("implement me")
}
func (mockListener) Resolve(*enode.Node) *enode.Node {
panic("implement me")
}
func (mockListener) Ping(*enode.Node) error {
panic("implement me")
}
func (mockListener) RequestENR(*enode.Node) (*enode.Node, error) {
panic("implement me")
}
func (mockListener) LocalNode() *enode.LocalNode {
panic("implement me")
}
func (mockListener) RandomNodes() enode.Iterator {
panic("implement me")
}
func (mockListener) RebootListener() error { panic("implement me") }
func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP) {
_, pkey := createAddrAndPrivKey(t)
ipAddr := net.ParseIP("127.0.0.1")
@@ -45,7 +87,7 @@ func TestService_Stop_SetsStartedToFalse(t *testing.T) {
s, err := NewService(t.Context(), &Config{StateNotifier: &mock.MockStateNotifier{}, DB: testDB.SetupDB(t)})
require.NoError(t, err)
s.started = true
s.dv5Listener = testp2p.NewMockListener(nil, nil)
s.dv5Listener = &mockListener{}
assert.NoError(t, s.Stop())
assert.Equal(t, false, s.started)
}
@@ -71,7 +113,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
}
s, err := NewService(t.Context(), cfg)
require.NoError(t, err)
s.dv5Listener = testp2p.NewMockListener(nil, nil)
s.dv5Listener = &mockListener{}
s.custodyInfo = &custodyInfo{}
exitRoutine := make(chan bool)
go func() {
@@ -91,14 +133,14 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
func TestService_Status_NotRunning(t *testing.T) {
params.SetupTestConfigCleanup(t)
s := &Service{started: false}
s.dv5Listener = testp2p.NewMockListener(nil, nil)
s.dv5Listener = &mockListener{}
assert.ErrorContains(t, "not running", s.Status(), "Status returned wrong error")
}
func TestService_Status_NoGenesisTimeSet(t *testing.T) {
params.SetupTestConfigCleanup(t)
s := &Service{started: true}
s.dv5Listener = testp2p.NewMockListener(nil, nil)
s.dv5Listener = &mockListener{}
assert.ErrorContains(t, "no genesis time set", s.Status(), "Status returned wrong error")
s.genesisTime = time.Now()

View File

@@ -27,6 +27,8 @@ import (
"github.com/prysmaticlabs/go-bitfield"
)
const nfdEnrKey = "nfd" // The ENR record key for "nfd" (Next Fork Digest).
var (
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
@@ -134,24 +136,6 @@ func (s *Service) FindAndDialPeersWithSubnets(
return nil
}
// updateDefectiveSubnets updates the defective subnets map when a node with matching subnets is found.
// It decrements the defective count for each subnet the node satisfies and removes subnets
// that are fully satisfied (count reaches 0).
func updateDefectiveSubnets(
nodeSubnets map[uint64]bool,
defectiveSubnets map[uint64]int,
) {
for subnet := range defectiveSubnets {
if !nodeSubnets[subnet] {
continue
}
defectiveSubnets[subnet]--
if defectiveSubnets[subnet] == 0 {
delete(defectiveSubnets, subnet)
}
}
}
// findPeersWithSubnets finds peers subscribed to defective subnets in batches
// until enough peers are found or the context is canceled.
// It returns new peers found during the search.
@@ -187,7 +171,6 @@ func (s *Service) findPeersWithSubnets(
// Crawl the network for peers subscribed to the defective subnets.
nodeByNodeID := make(map[enode.ID]*enode.Node)
for len(defectiveSubnets) > 0 && iterator.Next() {
if err := ctx.Err(); err != nil {
// Convert the map to a slice.
@@ -199,28 +182,14 @@ func (s *Service) findPeersWithSubnets(
return peersToDial, err
}
// Get all needed subnets that the node is subscribed to.
// Skip nodes that are not subscribed to any of the defective subnets.
node := iterator.Node()
// Remove duplicates, keeping the node with higher seq.
existing, ok := nodeByNodeID[node.ID()]
if ok && existing.Seq() >= node.Seq() {
continue // keep existing and skip.
}
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
// Skip peer not matching the filter.
if !s.filterPeer(node) {
if ok {
// this means the existing peer with the lower sequence number is no longer valid
delete(nodeByNodeID, existing.ID())
// Note: We are choosing to not rollback changes to the defective subnets map in favor of calling s.defectiveSubnets once again after dialing peers.
// This is a case that should rarely happen and should be handled through a second iteration in FindAndDialPeersWithSubnets
}
continue
}
// Get all needed subnets that the node is subscribed to.
// Skip nodes that are not subscribed to any of the defective subnets.
nodeSubnets, err := filter(node)
if err != nil {
return nil, errors.Wrap(err, "filter node")
@@ -229,14 +198,30 @@ func (s *Service) findPeersWithSubnets(
continue
}
// We found a new peer. Modify the defective subnets map
// and the filter accordingly.
// Remove duplicates, keeping the node with higher seq.
existing, ok := nodeByNodeID[node.ID()]
if ok && existing.Seq() > node.Seq() {
continue
}
nodeByNodeID[node.ID()] = node
updateDefectiveSubnets(nodeSubnets, defectiveSubnets)
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
if err != nil {
return nil, errors.Wrap(err, "node filter")
// We found a new peer. Modify the defective subnets map
// and the filter accordingly.
for subnet := range defectiveSubnets {
if !nodeSubnets[subnet] {
continue
}
defectiveSubnets[subnet]--
if defectiveSubnets[subnet] == 0 {
delete(defectiveSubnets, subnet)
}
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
if err != nil {
return nil, errors.Wrap(err, "node filter")
}
}
}

View File

@@ -10,19 +10,14 @@ import (
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v6/config/params"
ecdsaprysm "github.com/OffchainLabs/prysm/v6/crypto/ecdsa"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
"github.com/prysmaticlabs/go-bitfield"
)
@@ -546,552 +541,3 @@ func TestInitializePersistentSubnets(t *testing.T) {
assert.Equal(t, 2, len(subs))
assert.Equal(t, true, expTime.After(time.Now()))
}
func TestFindPeersWithSubnets_NodeDeduplication(t *testing.T) {
params.SetupTestConfigCleanup(t)
cache.SubnetIDs.EmptyAllCaches()
defer cache.SubnetIDs.EmptyAllCaches()
ctx := context.Background()
db := testDB.SetupDB(t)
localNode1 := createTestNodeWithID(t, "node1")
localNode2 := createTestNodeWithID(t, "node2")
localNode3 := createTestNodeWithID(t, "node3")
// Create different sequence versions of node1 with subnet 1
setNodeSubnets(localNode1, []uint64{1})
setNodeSeq(localNode1, 1)
node1_seq1_subnet1 := localNode1.Node()
setNodeSeq(localNode1, 2)
node1_seq2_subnet1 := localNode1.Node() // Same ID, higher seq
setNodeSeq(localNode1, 3)
node1_seq3_subnet1 := localNode1.Node() // Same ID, even higher seq
// Node2 with different sequences and subnets
setNodeSubnets(localNode2, []uint64{1})
node2_seq1_subnet1 := localNode2.Node()
setNodeSubnets(localNode2, []uint64{2}) // Different subnet
setNodeSeq(localNode2, 2)
node2_seq2_subnet2 := localNode2.Node()
// Node3 with multiple subnets
setNodeSubnets(localNode3, []uint64{1, 2})
node3_seq1_subnet1_2 := localNode3.Node()
tests := []struct {
name string
nodes []*enode.Node
defectiveSubnets map[uint64]int
expectedCount int
description string
eval func(t *testing.T, result []*enode.Node) // Custom validation function
}{
{
name: "No duplicates - unique nodes with same subnet",
nodes: []*enode.Node{
node2_seq1_subnet1,
node3_seq1_subnet1_2,
},
defectiveSubnets: map[uint64]int{1: 2},
expectedCount: 2,
description: "Should return all unique nodes subscribed to subnet",
eval: nil, // No special validation needed
},
{
name: "Duplicate with lower seq first - should replace",
nodes: []*enode.Node{
node1_seq1_subnet1,
node1_seq2_subnet1, // Higher seq, should replace
node2_seq1_subnet1, // Different node to ensure we process enough nodes
},
defectiveSubnets: map[uint64]int{1: 2}, // Need 2 peers for subnet 1
expectedCount: 2,
description: "Should replace with higher seq node for same subnet",
eval: func(t *testing.T, result []*enode.Node) {
found := false
for _, node := range result {
if node.ID() == node1_seq2_subnet1.ID() && node.Seq() == node1_seq2_subnet1.Seq() {
found = true
break
}
}
require.Equal(t, true, found, "Should have node with higher seq")
},
},
{
name: "Duplicate with higher seq first - should keep existing",
nodes: []*enode.Node{
node1_seq3_subnet1, // Higher seq
node1_seq2_subnet1, // Lower seq, should be skipped (continue branch)
node1_seq1_subnet1, // Even lower seq, should also be skipped (continue branch)
node2_seq1_subnet1, // Different node
},
defectiveSubnets: map[uint64]int{1: 2},
expectedCount: 2,
description: "Should keep existing node with higher seq and skip lower seq duplicates",
eval: func(t *testing.T, result []*enode.Node) {
found := false
for _, node := range result {
if node.ID() == node1_seq3_subnet1.ID() && node.Seq() == node1_seq3_subnet1.Seq() {
found = true
break
}
}
require.Equal(t, true, found, "Should have node with highest seq")
},
},
{
name: "Multiple updates for same node",
nodes: []*enode.Node{
node1_seq1_subnet1,
node1_seq2_subnet1, // Should replace seq1
node1_seq3_subnet1, // Should replace seq2
node2_seq1_subnet1, // Different node
},
defectiveSubnets: map[uint64]int{1: 2},
expectedCount: 2,
description: "Should keep updating to highest seq",
eval: func(t *testing.T, result []*enode.Node) {
found := false
for _, node := range result {
if node.ID() == node1_seq3_subnet1.ID() && node.Seq() == node1_seq3_subnet1.Seq() {
found = true
break
}
}
require.Equal(t, true, found, "Should have node with highest seq")
},
},
{
name: "Duplicate with equal seq in subnets - should skip",
nodes: []*enode.Node{
node1_seq2_subnet1, // First occurrence
node1_seq2_subnet1, // Same exact node instance, should be skipped (continue branch)
node2_seq1_subnet1, // Different node
},
defectiveSubnets: map[uint64]int{1: 2},
expectedCount: 2,
description: "Should skip duplicate with equal sequence number in subnet search",
eval: func(t *testing.T, result []*enode.Node) {
foundNode1 := false
foundNode2 := false
node1Count := 0
for _, node := range result {
if node.ID() == node1_seq2_subnet1.ID() {
require.Equal(t, node1_seq2_subnet1.Seq(), node.Seq(), "Node1 should have expected seq")
foundNode1 = true
node1Count++
}
if node.ID() == node2_seq1_subnet1.ID() {
foundNode2 = true
}
}
require.Equal(t, true, foundNode1, "Should have node1")
require.Equal(t, true, foundNode2, "Should have node2")
require.Equal(t, 1, node1Count, "Should have exactly one instance of node1")
},
},
{
name: "Mix with different subnets",
nodes: []*enode.Node{
node2_seq1_subnet1,
node2_seq2_subnet2, // Higher seq but different subnet
node3_seq1_subnet1_2,
},
defectiveSubnets: map[uint64]int{1: 2, 2: 1},
expectedCount: 2, // node2 (latest) and node3
description: "Should handle nodes with different subnet subscriptions",
eval: nil, // Basic count validation is sufficient
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gFlags := new(flags.GlobalFlags)
gFlags.MinimumPeersPerSubnet = 1
flags.Init(gFlags)
defer flags.Init(new(flags.GlobalFlags))
fakePeer := testp2p.NewTestP2P(t)
s := &Service{
cfg: &Config{
MaxPeers: 30,
DB: db,
},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
peers: peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
}),
host: fakePeer.BHost,
}
localNode := createTestNodeRandom(t)
mockIter := testp2p.NewMockIterator(tt.nodes)
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
digest, err := s.currentForkDigest()
require.NoError(t, err)
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
defer cancel()
result, err := s.findPeersWithSubnets(
ctxWithTimeout,
AttestationSubnetTopicFormat,
digest,
1,
tt.defectiveSubnets,
)
require.NoError(t, err, tt.description)
require.Equal(t, tt.expectedCount, len(result), tt.description)
if tt.eval != nil {
tt.eval(t, result)
}
})
}
}
func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
params.SetupTestConfigCleanup(t)
cache.SubnetIDs.EmptyAllCaches()
defer cache.SubnetIDs.EmptyAllCaches()
ctx := context.Background()
db := testDB.SetupDB(t)
localNode1 := createTestNodeWithID(t, "node1")
localNode2 := createTestNodeWithID(t, "node2")
localNode3 := createTestNodeWithID(t, "node3")
// Create versions of node1 with subnet 1
setNodeSubnets(localNode1, []uint64{1})
setNodeSeq(localNode1, 1)
node1_seq1_valid_subnet1 := localNode1.Node()
// Create bad version (higher seq)
setNodeSeq(localNode1, 2)
node1_seq2_bad_subnet1 := localNode1.Node()
// Create another valid version
setNodeSeq(localNode1, 3)
node1_seq3_valid_subnet1 := localNode1.Node()
// Node2 with subnet 1
setNodeSubnets(localNode2, []uint64{1})
node2_seq1_valid_subnet1 := localNode2.Node()
// Node3 with subnet 1 and 2
setNodeSubnets(localNode3, []uint64{1, 2})
node3_seq1_valid_subnet1_2 := localNode3.Node()
tests := []struct {
name string
nodes []*enode.Node
defectiveSubnets map[uint64]int
expectedCount int
description string
eval func(t *testing.T, result []*enode.Node)
}{
{
name: "Valid node in subnet followed by bad version - should remove",
nodes: []*enode.Node{
node1_seq1_valid_subnet1, // First add valid node with subnet 1
node1_seq2_bad_subnet1, // Invalid version with higher seq - should delete
node2_seq1_valid_subnet1, // Different valid node with subnet 1
},
defectiveSubnets: map[uint64]int{1: 2}, // Need 2 peers for subnet 1
expectedCount: 1, // Only node2 should remain
description: "Should remove node from map when bad version arrives, even if it has required subnet",
eval: func(t *testing.T, result []*enode.Node) {
foundNode1 := false
foundNode2 := false
for _, node := range result {
if node.ID() == node1_seq1_valid_subnet1.ID() {
foundNode1 = true
}
if node.ID() == node2_seq1_valid_subnet1.ID() {
foundNode2 = true
}
}
require.Equal(t, false, foundNode1, "Node1 should have been removed despite having subnet")
require.Equal(t, true, foundNode2, "Node2 should be present")
},
},
{
name: "Bad node with subnet stays bad even with higher seq",
nodes: []*enode.Node{
node1_seq2_bad_subnet1, // First bad node - not added
node1_seq3_valid_subnet1, // Higher seq but same bad peer ID
node2_seq1_valid_subnet1, // Different valid node
},
defectiveSubnets: map[uint64]int{1: 2},
expectedCount: 1, // Only node2 (node1 remains bad)
description: "Bad peer with subnet remains bad even with higher seq",
eval: func(t *testing.T, result []*enode.Node) {
foundNode1 := false
foundNode2 := false
for _, node := range result {
if node.ID() == node1_seq3_valid_subnet1.ID() {
foundNode1 = true
}
if node.ID() == node2_seq1_valid_subnet1.ID() {
foundNode2 = true
}
}
require.Equal(t, false, foundNode1, "Node1 should remain bad despite having subnet")
require.Equal(t, true, foundNode2, "Node2 should be present")
},
},
{
name: "Mixed valid and bad nodes with subnets",
nodes: []*enode.Node{
node1_seq1_valid_subnet1, // Add valid node1 with subnet
node2_seq1_valid_subnet1, // Add valid node2 with subnet
node1_seq2_bad_subnet1, // Invalid update for node1 - should remove
node3_seq1_valid_subnet1_2, // Add valid node3 with multiple subnets
},
defectiveSubnets: map[uint64]int{1: 3}, // Need 3 peers for subnet 1
expectedCount: 2, // Only node2 and node3 should remain
description: "Should handle removal of nodes with subnets when they become bad",
eval: func(t *testing.T, result []*enode.Node) {
foundNode1 := false
foundNode2 := false
foundNode3 := false
for _, node := range result {
if node.ID() == node1_seq1_valid_subnet1.ID() {
foundNode1 = true
}
if node.ID() == node2_seq1_valid_subnet1.ID() {
foundNode2 = true
}
if node.ID() == node3_seq1_valid_subnet1_2.ID() {
foundNode3 = true
}
}
require.Equal(t, false, foundNode1, "Node1 should have been removed")
require.Equal(t, true, foundNode2, "Node2 should be present")
require.Equal(t, true, foundNode3, "Node3 should be present")
},
},
{
name: "Node with subnet marked bad stays bad for all sequences",
nodes: []*enode.Node{
node1_seq1_valid_subnet1, // Add valid node1 with subnet
node1_seq2_bad_subnet1, // Bad update - should remove and mark bad
node1_seq3_valid_subnet1, // Higher seq but still same bad peer ID
node2_seq1_valid_subnet1, // Different valid node
},
defectiveSubnets: map[uint64]int{1: 2},
expectedCount: 1, // Only node2 (node1 stays bad)
description: "Once marked bad, subnet peer stays bad for all sequences",
eval: func(t *testing.T, result []*enode.Node) {
foundNode1 := false
foundNode2 := false
for _, node := range result {
if node.ID() == node1_seq3_valid_subnet1.ID() {
foundNode1 = true
}
if node.ID() == node2_seq1_valid_subnet1.ID() {
foundNode2 = true
}
}
require.Equal(t, false, foundNode1, "Node1 should stay bad")
require.Equal(t, true, foundNode2, "Node2 should be present")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Initialize flags for subnet operations
gFlags := new(flags.GlobalFlags)
gFlags.MinimumPeersPerSubnet = 1
flags.Init(gFlags)
defer flags.Init(new(flags.GlobalFlags))
// Create test P2P instance
fakePeer := testp2p.NewTestP2P(t)
// Create mock service
s := &Service{
cfg: &Config{
MaxPeers: 30,
DB: db,
},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
peers: peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
}),
host: fakePeer.BHost,
}
// Mark specific node versions as "bad" to simulate filterPeer failures
for _, node := range tt.nodes {
if node == node1_seq2_bad_subnet1 {
// Get peer ID from the node to mark it as bad
peerData, _, _ := convertToAddrInfo(node)
if peerData != nil {
s.peers.Add(node.Record(), peerData.ID, nil, network.DirUnknown)
// Mark as bad peer - this will make filterPeer return false
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
}
}
}
localNode := createTestNodeRandom(t)
mockIter := testp2p.NewMockIterator(tt.nodes)
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
digest, err := s.currentForkDigest()
require.NoError(t, err)
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
defer cancel()
result, err := s.findPeersWithSubnets(
ctxWithTimeout,
AttestationSubnetTopicFormat,
digest,
1,
tt.defectiveSubnets,
)
require.NoError(t, err, tt.description)
require.Equal(t, tt.expectedCount, len(result), tt.description)
if tt.eval != nil {
tt.eval(t, result)
}
})
}
}
// callbackIterator allows us to execute callbacks at specific points during iteration
type callbackIteratorForSubnets struct {
nodes []*enode.Node
index int
callbacks map[int]func() // map from index to callback function
}
func (c *callbackIteratorForSubnets) Next() bool {
// Execute callback before checking if we can continue (if one exists)
if callback, exists := c.callbacks[c.index]; exists {
callback()
}
return c.index < len(c.nodes)
}
func (c *callbackIteratorForSubnets) Node() *enode.Node {
if c.index >= len(c.nodes) {
return nil
}
node := c.nodes[c.index]
c.index++
return node
}
func (c *callbackIteratorForSubnets) Close() {
// Nothing to clean up for this simple implementation
}
func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
// This test successfully triggers delete(nodeByNodeID, node.ID()) in subnets.go by:
// 1. Processing node1_seq1 first (passes filterPeer, gets added to map
// 2. Callback marks peer as bad before processing node1_seq2"
// 3. Processing node1_seq2 (fails filterPeer, triggers delete since ok=true
params.SetupTestConfigCleanup(t)
cache.SubnetIDs.EmptyAllCaches()
defer cache.SubnetIDs.EmptyAllCaches()
ctx := context.Background()
db := testDB.SetupDB(t)
// Create LocalNode with same ID but different sequences
localNode1 := createTestNodeWithID(t, "testnode")
setNodeSubnets(localNode1, []uint64{1})
node1_seq1 := localNode1.Node() // Get current node
currentSeq := node1_seq1.Seq()
setNodeSeq(localNode1, currentSeq+1) // Increment sequence by 1
node1_seq2 := localNode1.Node() // This should have higher seq
// Additional node to ensure we have enough peers to process
localNode2 := createTestNodeWithID(t, "othernode")
setNodeSubnets(localNode2, []uint64{1})
node2 := localNode2.Node()
gFlags := new(flags.GlobalFlags)
gFlags.MinimumPeersPerSubnet = 1
flags.Init(gFlags)
defer flags.Init(new(flags.GlobalFlags))
fakePeer := testp2p.NewTestP2P(t)
service := &Service{
cfg: &Config{
MaxPeers: 30,
DB: db,
},
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
peers: peers.NewStatus(ctx, &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
}),
host: fakePeer.BHost,
}
// Create iterator with callback that marks peer as bad before processing node1_seq2
iter := &callbackIteratorForSubnets{
nodes: []*enode.Node{node1_seq1, node1_seq2, node2},
index: 0,
callbacks: map[int]func(){
1: func() { // Before processing node1_seq2 (index 1)
// Mark peer as bad before processing node1_seq2
peerData, _, _ := convertToAddrInfo(node1_seq2)
if peerData != nil {
service.peers.Add(node1_seq2.Record(), peerData.ID, nil, network.DirUnknown)
// Mark as bad peer - need enough increments to exceed threshold (6)
for i := 0; i < 10; i++ {
service.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
}
}
},
},
}
localNode := createTestNodeRandom(t)
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
digest, err := service.currentForkDigest()
require.NoError(t, err)
// Run findPeersWithSubnets - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
result, err := service.findPeersWithSubnets(
ctxWithTimeout,
AttestationSubnetTopicFormat,
digest,
1,
map[uint64]int{1: 2}, // Need 2 peers for subnet 1
)
require.NoError(t, err)
require.Equal(t, 1, len(result))
require.Equal(t, localNode2.Node().ID(), result[0].ID()) // only node2 should remain
}

View File

@@ -7,7 +7,6 @@ go_library(
"fuzz_p2p.go",
"mock_broadcaster.go",
"mock_host.go",
"mock_listener.go",
"mock_metadataprovider.go",
"mock_peermanager.go",
"mock_peersprovider.go",

View File

@@ -1,128 +0,0 @@
package testing
import (
"github.com/ethereum/go-ethereum/p2p/enode"
)
// MockListener is a mock implementation of the Listener and ListenerRebooter interfaces
// that can be used in tests. It provides configurable behavior for all methods.
type MockListener struct {
LocalNodeFunc func() *enode.LocalNode
SelfFunc func() *enode.Node
RandomNodesFunc func() enode.Iterator
LookupFunc func(enode.ID) []*enode.Node
ResolveFunc func(*enode.Node) *enode.Node
PingFunc func(*enode.Node) error
RequestENRFunc func(*enode.Node) (*enode.Node, error)
RebootFunc func() error
CloseFunc func()
// Default implementations
localNode *enode.LocalNode
iterator enode.Iterator
}
// NewMockListener creates a new MockListener with default implementations
func NewMockListener(localNode *enode.LocalNode, iterator enode.Iterator) *MockListener {
return &MockListener{
localNode: localNode,
iterator: iterator,
}
}
func (m *MockListener) LocalNode() *enode.LocalNode {
if m.LocalNodeFunc != nil {
return m.LocalNodeFunc()
}
return m.localNode
}
func (m *MockListener) Self() *enode.Node {
if m.SelfFunc != nil {
return m.SelfFunc()
}
if m.localNode != nil {
return m.localNode.Node()
}
return nil
}
func (m *MockListener) RandomNodes() enode.Iterator {
if m.RandomNodesFunc != nil {
return m.RandomNodesFunc()
}
return m.iterator
}
func (m *MockListener) Lookup(id enode.ID) []*enode.Node {
if m.LookupFunc != nil {
return m.LookupFunc(id)
}
return nil
}
func (m *MockListener) Resolve(node *enode.Node) *enode.Node {
if m.ResolveFunc != nil {
return m.ResolveFunc(node)
}
return nil
}
func (m *MockListener) Ping(node *enode.Node) error {
if m.PingFunc != nil {
return m.PingFunc(node)
}
return nil
}
func (m *MockListener) RequestENR(node *enode.Node) (*enode.Node, error) {
if m.RequestENRFunc != nil {
return m.RequestENRFunc(node)
}
return nil, nil
}
func (m *MockListener) RebootListener() error {
if m.RebootFunc != nil {
return m.RebootFunc()
}
return nil
}
func (m *MockListener) Close() {
if m.CloseFunc != nil {
m.CloseFunc()
}
}
// MockIterator is a mock implementation of enode.Iterator for testing
type MockIterator struct {
Nodes []*enode.Node
Position int
Closed bool
}
func NewMockIterator(nodes []*enode.Node) *MockIterator {
return &MockIterator{
Nodes: nodes,
}
}
func (m *MockIterator) Next() bool {
if m.Closed || m.Position >= len(m.Nodes) {
return false
}
m.Position++
return true
}
func (m *MockIterator) Node() *enode.Node {
if m.Position == 0 || m.Position > len(m.Nodes) {
return nil
}
return m.Nodes[m.Position-1]
}
func (m *MockIterator) Close() {
m.Closed = true
}

View File

@@ -50,7 +50,6 @@ const (
// TestP2P represents a p2p implementation that can be used for testing.
type TestP2P struct {
mu sync.Mutex
t *testing.T
BHost host.Host
EnodeID enode.ID
@@ -64,7 +63,6 @@ type TestP2P struct {
custodyInfoMut sync.RWMutex // protects custodyGroupCount and earliestAvailableSlot
earliestAvailableSlot primitives.Slot
custodyGroupCount uint64
enr *enr.Record
}
// NewTestP2P initializes a new p2p test service.
@@ -105,7 +103,6 @@ func NewTestP2P(t *testing.T, userOptions ...config.Option) *TestP2P {
pubsub: ps,
joinedTopics: map[string]*pubsub.Topic{},
peers: peerStatuses,
enr: new(enr.Record),
}
}
@@ -244,8 +241,6 @@ func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler)
// JoinTopic will join PubSub topic, if not already joined.
func (p *TestP2P) JoinTopic(topic string, opts ...pubsub.TopicOpt) (*pubsub.Topic, error) {
p.mu.Lock()
defer p.mu.Unlock()
if _, ok := p.joinedTopics[topic]; !ok {
joinedTopic, err := p.pubsub.Join(topic, opts...)
if err != nil {
@@ -315,8 +310,8 @@ func (p *TestP2P) Host() host.Host {
}
// ENR returns the enr of the local peer.
func (p *TestP2P) ENR() *enr.Record {
return p.enr
func (*TestP2P) ENR() *enr.Record {
return new(enr.Record)
}
// NodeID returns the node id of the local peer.

View File

@@ -1230,7 +1230,6 @@ func (s *Service) prysmBeaconEndpoints(
methods: []string{http.MethodGet},
},
{
// Warning: no longer supported post Fulu fork
template: "/prysm/v1/beacon/blobs",
name: namespace + ".PublishBlobs",
middleware: []middleware.Middleware{

View File

@@ -18,7 +18,6 @@ go_library(
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/altair:go_default_library",
@@ -61,6 +60,7 @@ go_library(
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -72,7 +72,6 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"handlers_equivocation_test.go",
"handlers_pool_test.go",
"handlers_state_test.go",
"handlers_test.go",
@@ -84,7 +83,6 @@ go_test(
"//api:go_default_library",
"//api/server:go_default_library",
"//api/server/structs:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
"//beacon-chain/core/signing:go_default_library",
@@ -125,6 +123,7 @@ go_test(
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",

View File

@@ -13,7 +13,6 @@ import (
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
corehelpers "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
@@ -33,6 +32,7 @@ import (
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
"github.com/sirupsen/logrus"
@@ -334,26 +334,26 @@ func (s *Server) GetBlockAttestationsV2(w http.ResponseWriter, r *http.Request)
consensusAtts := blk.Block().Body().Attestations()
v := blk.Block().Version()
attStructs := make([]interface{}, len(consensusAtts))
var attStructs []interface{}
if v >= version.Electra {
for index, att := range consensusAtts {
for _, att := range consensusAtts {
a, ok := att.(*eth.AttestationElectra)
if !ok {
httputil.HandleError(w, fmt.Sprintf("unable to convert consensus attestations electra of type %T", att), http.StatusInternalServerError)
return
}
attStruct := structs.AttElectraFromConsensus(a)
attStructs[index] = attStruct
attStructs = append(attStructs, attStruct)
}
} else {
for index, att := range consensusAtts {
for _, att := range consensusAtts {
a, ok := att.(*eth.Attestation)
if !ok {
httputil.HandleError(w, fmt.Sprintf("unable to convert consensus attestation of type %T", att), http.StatusInternalServerError)
return
}
attStruct := structs.AttFromConsensus(a)
attStructs[index] = attStruct
attStructs = append(attStructs, attStruct)
}
}
@@ -701,7 +701,7 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
// Validate and optionally broadcast sidecars on equivocation.
if err := s.validateBroadcast(ctx, r, genericBlock); err != nil {
if errors.Is(err, errEquivocatedBlock) {
b, err := blocks.NewSignedBeaconBlock(genericBlock.Block)
b, err := blocks.NewSignedBeaconBlock(genericBlock)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
@@ -855,7 +855,7 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
// Validate and optionally broadcast sidecars on equivocation.
if err := s.validateBroadcast(ctx, r, genericBlock); err != nil {
if errors.Is(err, errEquivocatedBlock) {
b, err := blocks.NewSignedBeaconBlock(genericBlock.Block)
b, err := blocks.NewSignedBeaconBlock(genericBlock)
if err != nil {
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
return
@@ -942,13 +942,14 @@ func decodePhase0JSON(body []byte) (*eth.GenericSignedBeaconBlock, error) {
// broadcastSidecarsIfSupported broadcasts blob sidecars when an equivocated block occurs.
func broadcastSidecarsIfSupported(ctx context.Context, s *Server, b interfaces.SignedBeaconBlock, gb *eth.GenericSignedBeaconBlock, versionHeader string) error {
switch versionHeader {
case version.String(version.Fulu):
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetFulu().Blobs, gb.GetFulu().KzgProofs)
case version.String(version.Electra):
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetElectra().Blobs, gb.GetElectra().KzgProofs)
case version.String(version.Deneb):
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetDeneb().Blobs, gb.GetDeneb().KzgProofs)
default:
// other forks before Deneb do not support blob sidecars
// forks after fulu do not support blob sidecars, instead support data columns, no need to rebroadcast
return nil
}
}
@@ -1052,7 +1053,7 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
return nil
}
if err := s.validateBlobs(blk, blobs, proofs); err != nil {
if err := s.validateBlobSidecars(blk, blobs, proofs); err != nil {
return err
}
@@ -1066,41 +1067,23 @@ func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
return nil
}
func (s *Server) validateBlobs(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
func (s *Server) validateBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
if blk.Version() < version.Deneb {
return nil
}
numberOfColumns := params.BeaconConfig().NumberOfColumns
commitments, err := blk.Block().Body().BlobKzgCommitments()
kzgs, err := blk.Block().Body().BlobKzgCommitments()
if err != nil {
return errors.Wrap(err, "could not get blob kzg commitments")
}
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blk.Block().Slot())
if len(blobs) > maxBlobsPerBlock {
return fmt.Errorf("number of blobs over max, %d > %d", len(blobs), maxBlobsPerBlock)
if len(blobs) != len(proofs) || len(blobs) != len(kzgs) {
return errors.New("number of blobs, proofs, and commitments do not match")
}
if blk.Version() >= version.Fulu {
// For Fulu blocks, proofs are cell proofs (blobs * numberOfColumns)
expectedProofsCount := uint64(len(blobs)) * numberOfColumns
if uint64(len(proofs)) != expectedProofsCount || len(blobs) != len(commitments) {
return fmt.Errorf("number of blobs (%d), cell proofs (%d), and commitments (%d) do not match (expected %d cell proofs)", len(blobs), len(proofs), len(commitments), expectedProofsCount)
}
// For Fulu blocks, proofs are cell proofs from execution client's BlobsBundleV2
// Verify cell proofs directly without reconstructing data column sidecars
if err := kzg.VerifyCellKZGProofBatchFromBlobData(blobs, commitments, proofs, numberOfColumns); err != nil {
return errors.Wrap(err, "could not verify cell proofs")
}
} else {
// For pre-Fulu blocks, proofs are blob proofs (1:1 with blobs)
if len(blobs) != len(proofs) || len(blobs) != len(commitments) {
return errors.Errorf("number of blobs (%d), proofs (%d), and commitments (%d) do not match", len(blobs), len(proofs), len(commitments))
}
// Use batch verification for better performance
if err := kzg.VerifyBlobKZGProofBatch(blobs, commitments, proofs); err != nil {
return errors.Wrap(err, "could not verify blob proofs")
for i, blob := range blobs {
b := kzg4844.Blob(blob)
if err := kzg4844.VerifyBlobProof(&b, kzg4844.Commitment(kzgs[i]), kzg4844.Proof(proofs[i])); err != nil {
return errors.Wrap(err, "could not verify blob proof")
}
}
return nil
}
@@ -1644,8 +1627,6 @@ func (s *Server) broadcastSeenBlockSidecars(
if err != nil {
return err
}
// Broadcast blob sidecars with forkchoice checking
for _, sc := range scs {
r, err := sc.SignedBlockHeader.Header.HashTreeRoot()
if err != nil {

View File

@@ -1,35 +0,0 @@
package beacon
import (
"encoding/json"
"testing"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
rpctesting "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared/testing"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/testing/require"
)
// TestBlocks_NewSignedBeaconBlock_EquivocationFix tests that blocks.NewSignedBeaconBlock
// correctly handles the fixed case where genericBlock.Block is passed instead of genericBlock
func TestBlocks_NewSignedBeaconBlock_EquivocationFix(t *testing.T) {
// Parse the Phase0 JSON block
var block structs.SignedBeaconBlock
err := json.Unmarshal([]byte(rpctesting.Phase0Block), &block)
require.NoError(t, err)
// Convert to generic format
genericBlock, err := block.ToGeneric()
require.NoError(t, err)
// Test the FIX: pass genericBlock.Block instead of genericBlock
// This is what our fix changed in handlers.go line 704 and 858
_, err = blocks.NewSignedBeaconBlock(genericBlock.Block)
require.NoError(t, err, "NewSignedBeaconBlock should work with genericBlock.Block")
// Test the BROKEN version: pass genericBlock directly (this should fail)
_, err = blocks.NewSignedBeaconBlock(genericBlock)
if err == nil {
t.Errorf("NewSignedBeaconBlock should fail with whole genericBlock but succeeded")
}
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/OffchainLabs/prysm/v6/api"
"github.com/OffchainLabs/prysm/v6/api/server/structs"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
chainMock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
@@ -41,6 +40,7 @@ import (
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/OffchainLabs/prysm/v6/time/slots"
GoKZG "github.com/crate-crypto/go-kzg-4844"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/pkg/errors"
ssz "github.com/prysmaticlabs/fastssz"
@@ -910,100 +910,6 @@ func TestGetBlockAttestations(t *testing.T) {
})
})
})
t.Run("empty-attestations", func(t *testing.T) {
t.Run("v1", func(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Body.Attestations = []*eth.Attestation{} // Explicitly set empty attestations
sb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
mockChainService := &chainMock.ChainService{
FinalizedRoots: map[[32]byte]bool{},
}
s := &Server{
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
}
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v1/beacon/blocks/{block_id}/attestations", nil)
request.SetPathValue("block_id", "head")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetBlockAttestations(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetBlockAttestationsResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
// Ensure data is empty array, not null
require.NotNil(t, resp.Data)
assert.Equal(t, 0, len(resp.Data))
})
t.Run("v2-pre-electra", func(t *testing.T) {
b := util.NewBeaconBlock()
b.Block.Body.Attestations = []*eth.Attestation{} // Explicitly set empty attestations
sb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
mockChainService := &chainMock.ChainService{
FinalizedRoots: map[[32]byte]bool{},
}
s := &Server{
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
}
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v2/beacon/blocks/{block_id}/attestations", nil)
request.SetPathValue("block_id", "head")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetBlockAttestationsV2(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetBlockAttestationsV2Response{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
// Ensure data is "[]", not null
require.NotNil(t, resp.Data)
assert.Equal(t, string(json.RawMessage("[]")), string(resp.Data))
})
t.Run("v2-electra", func(t *testing.T) {
eb := util.NewBeaconBlockFulu()
eb.Block.Body.Attestations = []*eth.AttestationElectra{} // Explicitly set empty attestations
esb, err := blocks.NewSignedBeaconBlock(eb)
require.NoError(t, err)
mockChainService := &chainMock.ChainService{
FinalizedRoots: map[[32]byte]bool{},
}
s := &Server{
OptimisticModeFetcher: mockChainService,
FinalizationFetcher: mockChainService,
Blocker: &testutil.MockBlocker{BlockToReturn: esb},
}
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v2/beacon/blocks/{block_id}/attestations", nil)
request.SetPathValue("block_id", "head")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetBlockAttestationsV2(writer, request)
require.Equal(t, http.StatusOK, writer.Code)
resp := &structs.GetBlockAttestationsV2Response{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
// Ensure data is "[]", not null
require.NotNil(t, resp.Data)
assert.Equal(t, string(json.RawMessage("[]")), string(resp.Data))
assert.Equal(t, "fulu", resp.Version)
})
})
}
func TestGetBlindedBlock(t *testing.T) {
@@ -4875,329 +4781,25 @@ func TestServer_broadcastBlobSidecars(t *testing.T) {
require.LogsContain(t, hook, "Broadcasted blob sidecar for already seen block")
}
func Test_validateBlobs(t *testing.T) {
require.NoError(t, kzg.Start())
func Test_validateBlobSidecars(t *testing.T) {
blob := util.GetRandBlob(123)
// Generate proper commitment and proof for the blob
var kzgBlob kzg.Blob
copy(kzgBlob[:], blob[:])
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
require.NoError(t, err)
proof, err := kzg.ComputeBlobKZGProof(&kzgBlob, commitment)
require.NoError(t, err)
commitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
proof := GoKZG.KZGProof{128, 110, 116, 170, 56, 111, 126, 87, 229, 234, 211, 42, 110, 150, 129, 206, 73, 142, 167, 243, 90, 149, 240, 240, 236, 204, 143, 182, 229, 249, 81, 27, 153, 171, 83, 70, 144, 250, 42, 1, 188, 215, 71, 235, 30, 7, 175, 86}
blk := util.NewBeaconBlockDeneb()
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
require.NoError(t, s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
require.ErrorContains(t, "number of blobs (1), proofs (0), and commitments (1) do not match", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{}))
require.ErrorContains(t, "number of blobs, proofs, and commitments do not match", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{}))
sk, err := bls.RandKey()
require.NoError(t, err)
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
b, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.ErrorContains(t, "could not verify blob proofs", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
blobs := [][]byte{}
commitments := [][]byte{}
proofs := [][]byte{}
for i := 0; i < 10; i++ {
blobs = append(blobs, blob[:])
commitments = append(commitments, commitment[:])
proofs = append(proofs, proof[:])
}
t.Run("pre-Deneb block should return early", func(t *testing.T) {
// Create a pre-Deneb block (e.g., Capella)
blk := util.NewBeaconBlockCapella()
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should return nil for pre-Deneb blocks regardless of blobs
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
require.NoError(t, s.validateBlobs(b, blobs[:1], proofs[:1]))
})
t.Run("Deneb block with valid single blob", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
})
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 100
testCfg.DeprecatedMaxBlobsPerBlock = 6
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 10 // Deneb slot
blk.Block.Body.BlobKzgCommitments = commitments[:6]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should pass with exactly 6 blobs
require.NoError(t, s.validateBlobs(b, blobs[:6], proofs[:6]))
})
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 100
testCfg.DeprecatedMaxBlobsPerBlock = 6
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 10 // Deneb slot
blk.Block.Body.BlobKzgCommitments = commitments[:7]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should fail with 7 blobs when max is 6
err = s.validateBlobs(b, blobs[:7], proofs[:7])
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
})
t.Run("Electra block with valid blobs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
// Set up Electra config with max 9 blobs
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockElectra()
blk.Block.Slot = 160 // Electra slot (epoch 5+)
blk.Block.Body.BlobKzgCommitments = commitments[:9]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should pass with 9 blobs in Electra
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
})
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
// Set up Electra config with max 9 blobs
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockElectra()
blk.Block.Slot = 160 // Electra slot
blk.Block.Body.BlobKzgCommitments = commitments[:10]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
// Should fail with 10 blobs when max is 9
err = s.validateBlobs(b, blobs[:10], proofs[:10])
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
})
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.FuluForkEpoch = 10
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
testCfg.NumberOfColumns = 128 // Standard PeerDAS configuration
params.OverrideBeaconConfig(testCfg)
// Create Fulu block with proper cell proofs
blk := util.NewBeaconBlockFulu()
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
// Generate valid commitments and cell proofs for testing
blobCount := 2
commitments := make([][]byte, blobCount)
fuluBlobs := make([][]byte, blobCount)
var kzgBlobs []kzg.Blob
for i := 0; i < blobCount; i++ {
blob := util.GetRandBlob(int64(i))
fuluBlobs[i] = blob[:]
var kzgBlob kzg.Blob
copy(kzgBlob[:], blob[:])
kzgBlobs = append(kzgBlobs, kzgBlob)
// Generate commitment
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
require.NoError(t, err)
commitments[i] = commitment[:]
}
blk.Block.Body.BlobKzgCommitments = commitments
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// Generate cell proofs for the blobs (flattened format like execution client)
numberOfColumns := params.BeaconConfig().NumberOfColumns
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
for blobIdx := 0; blobIdx < blobCount; blobIdx++ {
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlobs[blobIdx])
require.NoError(t, err)
for colIdx := uint64(0); colIdx < numberOfColumns; colIdx++ {
cellProofIdx := uint64(blobIdx)*numberOfColumns + colIdx
cellProofs[cellProofIdx] = cellsAndProofs.Proofs[colIdx][:]
}
}
s := &Server{}
// Should use cell batch verification for Fulu blocks
require.NoError(t, s.validateBlobs(b, fuluBlobs, cellProofs))
})
t.Run("Fulu block with invalid cell proof count", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 5
testCfg.FuluForkEpoch = 10
testCfg.NumberOfColumns = 128
params.OverrideBeaconConfig(testCfg)
blk := util.NewBeaconBlockFulu()
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
// Create valid commitments but wrong number of cell proofs
blobCount := 2
commitments := make([][]byte, blobCount)
fuluBlobs := make([][]byte, blobCount)
for i := 0; i < blobCount; i++ {
blob := util.GetRandBlob(int64(i))
fuluBlobs[i] = blob[:]
var kzgBlob kzg.Blob
copy(kzgBlob[:], blob[:])
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
require.NoError(t, err)
commitments[i] = commitment[:]
}
blk.Block.Body.BlobKzgCommitments = commitments
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
// Wrong number of cell proofs (should be blobCount * numberOfColumns)
wrongCellProofs := make([][]byte, 10) // Too few proofs
s := &Server{}
err = s.validateBlobs(b, fuluBlobs, wrongCellProofs)
require.ErrorContains(t, "do not match", err)
})
t.Run("Deneb block with invalid blob proof", func(t *testing.T) {
blob := util.GetRandBlob(123)
invalidProof := make([]byte, 48) // All zeros - invalid proof
sk, err := bls.RandKey()
require.NoError(t, err)
blk := util.NewBeaconBlockDeneb()
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
err = s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{invalidProof})
require.ErrorContains(t, "could not verify blob proofs", err)
})
t.Run("empty blobs and proofs should pass", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Body.BlobKzgCommitments = [][]byte{}
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
s := &Server{}
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
})
t.Run("BlobSchedule with progressive increases (BPO)", func(t *testing.T) {
cfg := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(cfg)
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
testCfg := params.BeaconConfig().Copy()
testCfg.DenebForkEpoch = 0
testCfg.ElectraForkEpoch = 100
testCfg.FuluForkEpoch = 200
testCfg.DeprecatedMaxBlobsPerBlock = 6
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
// Define blob schedule with progressive increases
testCfg.BlobSchedule = []params.BlobScheduleEntry{
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
}
params.OverrideBeaconConfig(testCfg)
s := &Server{}
// Test epoch 0-9: max 3 blobs
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 5 // Epoch 0
blk.Block.Body.BlobKzgCommitments = commitments[:3]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.validateBlobs(b, blobs[:3], proofs[:3]))
// Should fail with 4 blobs
blk.Block.Body.BlobKzgCommitments = commitments[:4]
b, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
err = s.validateBlobs(b, blobs[:4], proofs[:4])
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
})
// Test epoch 30+: max 9 blobs
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
blk := util.NewBeaconBlockDeneb()
blk.Block.Slot = 960 // Epoch 30
blk.Block.Body.BlobKzgCommitments = commitments[:9]
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
// Should fail with 10 blobs
blk.Block.Body.BlobKzgCommitments = commitments[:10]
b, err = blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
err = s.validateBlobs(b, blobs[:10], proofs[:10])
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
})
})
require.ErrorContains(t, "could not verify blob proof: can't verify opening proof", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
}
func TestGetPendingConsolidations(t *testing.T) {

View File

@@ -68,8 +68,7 @@ func (rs *BlockRewardService) GetBlockRewardsData(ctx context.Context, blk inter
Code: http.StatusInternalServerError,
}
}
exitInfo := validators.ExitInformation(st)
st, err = coreblocks.ProcessAttesterSlashings(ctx, st, blk.Body().AttesterSlashings(), exitInfo)
st, err = coreblocks.ProcessAttesterSlashings(ctx, st, blk.Body().AttesterSlashings(), validators.SlashValidator)
if err != nil {
return nil, &httputil.DefaultJsonError{
Message: "Could not get attester slashing rewards: " + err.Error(),
@@ -83,7 +82,7 @@ func (rs *BlockRewardService) GetBlockRewardsData(ctx context.Context, blk inter
Code: http.StatusInternalServerError,
}
}
st, err = coreblocks.ProcessProposerSlashings(ctx, st, blk.Body().ProposerSlashings(), exitInfo)
st, err = coreblocks.ProcessProposerSlashings(ctx, st, blk.Body().ProposerSlashings(), validators.SlashValidator)
if err != nil {
return nil, &httputil.DefaultJsonError{
Message: "Could not get proposer slashing rewards: " + err.Error(),

View File

@@ -1029,8 +1029,8 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
httputil.HandleError(w, fmt.Sprintf("Could not get head state: %v ", err), http.StatusInternalServerError)
return
}
// Notice that even for Fulu requests for the next epoch, we are only advancing the state to the start of the current epoch.
if st.Slot() < epochStartSlot {
// Advance state with empty transitions up to the requested epoch start slot for pre fulu state only. Fulu state utilizes proposer look ahead field.
if st.Slot() < epochStartSlot && st.Version() != version.Fulu {
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
httputil.HandleError(w, fmt.Sprintf("Could not get head root: %v ", err), http.StatusInternalServerError)

View File

@@ -2645,6 +2645,78 @@ func TestGetProposerDuties(t *testing.T) {
})
}
func TestGetProposerDuties_FuluState(t *testing.T) {
helpers.ClearCache()
// Create a Fulu state with slot 0 (before epoch 1 start slot which is 32)
fuluState, err := util.NewBeaconStateFulu()
require.NoError(t, err)
require.NoError(t, fuluState.SetSlot(0)) // Set to slot 0
// Create some validators for the test
depChainStart := params.BeaconConfig().MinGenesisActiveValidatorCount
deposits, _, err := util.DeterministicDepositsAndKeys(depChainStart)
require.NoError(t, err)
validators := make([]*ethpbalpha.Validator, len(deposits))
for i, deposit := range deposits {
validators[i] = &ethpbalpha.Validator{
PublicKey: deposit.Data.PublicKey,
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawalCredentials: make([]byte, 32),
}
}
require.NoError(t, fuluState.SetValidators(validators))
// Set up block roots
genesis := util.NewBeaconBlock()
genesisRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
roots := make([][]byte, fieldparams.BlockRootsLength)
roots[0] = genesisRoot[:]
require.NoError(t, fuluState.SetBlockRoots(roots))
chainSlot := primitives.Slot(0)
chain := &mockChain.ChainService{
State: fuluState, Root: genesisRoot[:], Slot: &chainSlot,
}
db := dbutil.SetupDB(t)
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
s := &Server{
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: fuluState}},
HeadFetcher: chain,
TimeFetcher: chain,
OptimisticModeFetcher: chain,
SyncChecker: &mockSync.Sync{IsSyncing: false},
PayloadIDCache: cache.NewPayloadIDCache(),
TrackedValidatorsCache: cache.NewTrackedValidatorsCache(),
BeaconDB: db,
}
// Request epoch 1 duties, which should require advancing from slot 0 to slot 32
// But for Fulu state, this advancement should be skipped
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
request.SetPathValue("epoch", "1")
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetProposerDuties(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
// Verify the state was not advanced - it should still be at slot 0
// This is the key assertion for the regression test
assert.Equal(t, primitives.Slot(0), fuluState.Slot(), "Fulu state should not have been advanced")
resp := &structs.GetProposerDutiesResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
// Should still return proposer duties despite not advancing the state
assert.Equal(t, true, len(resp.Data) > 0, "Should return proposer duties even without state advancement")
}
func TestGetSyncCommitteeDuties(t *testing.T) {
helpers.ClearCache()
params.SetupTestConfigCleanup(t)

View File

@@ -186,7 +186,6 @@ func (s *Server) GetChainHead(w http.ResponseWriter, r *http.Request) {
httputil.WriteJson(w, response)
}
// Warning: no longer supported post Fulu blobs
func (s *Server) PublishBlobs(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "beacon.PublishBlobs")
defer span.End()
@@ -216,15 +215,6 @@ func (s *Server) PublishBlobs(w http.ResponseWriter, r *http.Request) {
httputil.HandleError(w, "Could not decode blob sidecar: "+err.Error(), http.StatusBadRequest)
return
}
scEpoch := slots.ToEpoch(sc.SignedBlockHeader.Header.Slot)
if scEpoch < params.BeaconConfig().DenebForkEpoch {
httputil.HandleError(w, "Blob sidecars not supported for pre deneb", http.StatusBadRequest)
return
}
if scEpoch > params.BeaconConfig().FuluForkEpoch {
httputil.HandleError(w, "Blob sidecars not supported for post fulu blobs", http.StatusBadRequest)
return
}
readOnlySc, err := blocks.NewROBlobWithRoot(sc, bytesutil.ToBytes32(root))
if err != nil {

View File

@@ -1017,11 +1017,6 @@ func TestPublishBlobs_BadBlockRoot(t *testing.T) {
}
func TestPublishBlobs(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 0 // Set Deneb fork to epoch 0 so slot 0 is valid
params.OverrideBeaconConfig(cfg)
server := &Server{
BlobReceiver: &chainMock.ChainService{},
Broadcaster: &mockp2p.MockBroadcaster{},
@@ -1037,94 +1032,3 @@ func TestPublishBlobs(t *testing.T) {
assert.Equal(t, len(server.BlobReceiver.(*chainMock.ChainService).Blobs), 1)
assert.Equal(t, server.Broadcaster.(*mockp2p.MockBroadcaster).BroadcastCalled.Load(), true)
}
func TestPublishBlobs_PreDeneb(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 10 // Set Deneb fork to epoch 10, so slot 0 is pre-Deneb
params.OverrideBeaconConfig(cfg)
server := &Server{
BlobReceiver: &chainMock.ChainService{},
Broadcaster: &mockp2p.MockBroadcaster{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(rpctesting.PublishBlobsRequest)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlobs(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
assert.StringContains(t, "Blob sidecars not supported for pre deneb", writer.Body.String())
assert.Equal(t, len(server.BlobReceiver.(*chainMock.ChainService).Blobs), 0)
assert.Equal(t, server.Broadcaster.(*mockp2p.MockBroadcaster).BroadcastCalled.Load(), false)
}
func TestPublishBlobs_PostFulu(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.DenebForkEpoch = 0
cfg.FuluForkEpoch = 0 // Set Fulu fork to epoch 0
params.OverrideBeaconConfig(cfg)
server := &Server{
BlobReceiver: &chainMock.ChainService{},
Broadcaster: &mockp2p.MockBroadcaster{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
// Create a custom request with a slot in epoch 1 (which is > FuluForkEpoch of 0)
postFuluRequest := fmt.Sprintf(`{
"block_root" : "0x0000000000000000000000000000000000000000000000000000000000000000",
"blob_sidecars" : {
"sidecars" : [
{
"blob" : "%s",
"index" : "0",
"kzg_commitment" : "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"kzg_commitment_inclusion_proof" : [
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000"
],
"kzg_proof" : "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"signed_block_header" : {
"message" : {
"body_root" : "0x0000000000000000000000000000000000000000000000000000000000000000",
"parent_root" : "0x0000000000000000000000000000000000000000000000000000000000000000",
"proposer_index" : "0",
"slot" : "33",
"state_root" : "0x0000000000000000000000000000000000000000000000000000000000000000"
},
"signature" : "0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
}
}
]
}
}`, rpctesting.Blob)
request := httptest.NewRequest(http.MethodPost, "http://foo.example", bytes.NewReader([]byte(postFuluRequest)))
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
server.PublishBlobs(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
assert.StringContains(t, "Blob sidecars not supported for post fulu blobs", writer.Body.String())
assert.Equal(t, len(server.BlobReceiver.(*chainMock.ChainService).Blobs), 0)
assert.Equal(t, server.Broadcaster.(*mockp2p.MockBroadcaster).BroadcastCalled.Load(), false)
}

View File

@@ -19,7 +19,7 @@ func TestServer_GetBeaconConfig(t *testing.T) {
conf := params.BeaconConfig()
confType := reflect.TypeOf(conf).Elem()
numFields := confType.NumField()
// Count only exported fields, as unexported fields are not included in the config
exportedFields := 0
for i := 0; i < numFields; i++ {

View File

@@ -1,5 +1,3 @@
# gazelle:ignore
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
@@ -39,7 +37,6 @@ go_library(
"//api/client/builder:go_default_library",
"//async/event:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/blockchain/kzg:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/cache/depositsnapshot:go_default_library",
@@ -50,7 +47,6 @@ go_library(
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
@@ -67,8 +63,8 @@ go_library(
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
@@ -85,7 +81,7 @@ go_library(
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz:go_default_library",
"//genesis:go_default_library",
"//genesis:go_default_library",
"//math:go_default_library",
"//monitoring/tracing:go_default_library",
"//monitoring/tracing/trace:go_default_library",
@@ -185,6 +181,7 @@ common_deps = [
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
]
# gazelle:ignore
go_test(
name = "go_default_test",
timeout = "moderate",

View File

@@ -29,19 +29,12 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
require.NoError(t, err)
r1, err := eb.Block.HashTreeRoot()
require.NoError(t, err)
bundle := &enginev1.BlobsBundleV2{
KzgCommitments: [][]byte{{1, 2, 3}},
Proofs: [][]byte{{4, 5, 6}},
Blobs: [][]byte{{7, 8, 9}},
}
result, err := vs.constructGenericBeaconBlock(b, bundle, primitives.ZeroWei())
result, err := vs.constructGenericBeaconBlock(b, nil, primitives.ZeroWei())
require.NoError(t, err)
r2, err := result.GetFulu().Block.HashTreeRoot()
require.NoError(t, err)
require.Equal(t, r1, r2)
require.Equal(t, result.IsBlinded, false)
require.DeepEqual(t, bundle.Blobs, result.GetFulu().GetBlobs())
require.DeepEqual(t, bundle.Proofs, result.GetFulu().GetKzgProofs())
})
// Test for Electra version

View File

@@ -15,11 +15,9 @@ import (
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
@@ -60,31 +58,28 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
if err != nil {
log.WithError(err).Error("Could not convert slot to time")
}
log := log.WithField("slot", req.Slot)
log.WithField("sinceSlotStartTime", time.Since(t)).Info("Begin building block")
log.WithFields(logrus.Fields{
"slot": req.Slot,
"sinceSlotStartTime": time.Since(t),
}).Info("Begin building block")
// A syncing validator should not produce a block.
if vs.SyncChecker.Syncing() {
log.Error("Fail to build block: node is syncing")
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
if err := vs.optimisticStatus(ctx); err != nil {
log.WithError(err).Error("Fail to build block: node is optimistic")
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
}
}
head, parentRoot, err := vs.getParentState(ctx, req.Slot)
if err != nil {
log.WithError(err).Error("Fail to build block: could not get parent state")
return nil, err
}
sBlk, err := getEmptyBlock(req.Slot)
if err != nil {
log.WithError(err).Error("Fail to build block: could not get empty block")
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
}
// Set slot, graffiti, randao reveal, and parent root.
@@ -106,7 +101,8 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
}
resp, err := vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost, builderBoostFactor)
log = log.WithFields(logrus.Fields{
log := log.WithFields(logrus.Fields{
"slot": req.Slot,
"sinceSlotStartTime": time.Since(t),
"validator": sBlk.Block().ProposerIndex(),
})
@@ -279,11 +275,6 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
//
// ProposeBeaconBlock handles the proposal of beacon blocks.
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
var (
blobSidecars []*ethpb.BlobSidecar
dataColumnSidecars []*ethpb.DataColumnSidecar
)
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
defer span.End()
@@ -309,10 +300,11 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
var sidecars []*ethpb.BlobSidecar
if block.IsBlinded() {
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
} else if block.Version() >= version.Deneb {
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(block, req)
sidecars, err = vs.blobSidecarsFromUnblindedBlock(block, req)
}
if err != nil {
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
@@ -331,9 +323,10 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
errChan <- nil
}()
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
}
wg.Wait()
if err := <-errChan; err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
@@ -342,35 +335,12 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
return &ethpb.ProposeResponse{BlockRoot: root[:]}, nil
}
// broadcastAndReceiveSidecars broadcasts and receives sidecars.
func (vs *Server) broadcastAndReceiveSidecars(
ctx context.Context,
block interfaces.SignedBeaconBlock,
root [fieldparams.RootLength]byte,
blobSidecars []*ethpb.BlobSidecar,
dataColumnSideCars []*ethpb.DataColumnSidecar,
) error {
if block.Version() >= version.Fulu {
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root); err != nil {
return errors.Wrap(err, "broadcast and receive data columns")
}
return nil
}
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
return errors.Wrap(err, "broadcast and receive blobs")
}
return nil
}
// handleBlindedBlock processes blinded beacon blocks (pre-Fulu only).
// Post-Fulu blinded blocks are handled directly in ProposeBeaconBlock.
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
if block.Version() < version.Bellatrix {
return nil, nil, errors.New("pre-Bellatrix blinded block")
}
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
return nil, nil, errors.New("unconfigured block builder")
}
@@ -397,34 +367,16 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
return copiedBlock, sidecars, nil
}
func (vs *Server) handleUnblindedBlock(
block interfaces.SignedBeaconBlock,
req *ethpb.GenericSignedBeaconBlock,
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
func (vs *Server) blobSidecarsFromUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
rawBlobs, proofs, err := blobsAndProofs(req)
if err != nil {
return nil, nil, err
return nil, err
}
if block.Version() >= version.Fulu {
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
if err != nil {
return nil, nil, errors.Wrap(err, "construct data column sidecars")
}
return nil, dataColumnSideCars, nil
}
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
if err != nil {
return nil, nil, errors.Wrap(err, "build blob sidecars")
}
return blobSidecars, nil, nil
return BuildBlobSidecars(block, rawBlobs, proofs)
}
// broadcastReceiveBlock broadcasts a block and handles its reception.
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
protoBlock, err := block.Proto()
if err != nil {
return errors.Wrap(err, "protobuf conversion failed")
@@ -440,14 +392,18 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
}
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
eg, eCtx := errgroup.WithContext(ctx)
for subIdx, sc := range sidecars {
for i, sc := range sidecars {
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
subIdx := i
sCar := sc
eg.Go(func() error {
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sc); err != nil {
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sCar); err != nil {
return errors.Wrap(err, "broadcast blob failed")
}
readOnlySc, err := blocks.NewROBlobWithRoot(sc, root)
readOnlySc, err := blocks.NewROBlobWithRoot(sCar, root)
if err != nil {
return errors.Wrap(err, "ROBlob creation failed")
}
@@ -465,53 +421,6 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
return eg.Wait()
}
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
func (vs *Server) broadcastAndReceiveDataColumns(
ctx context.Context,
sidecars []*ethpb.DataColumnSidecar,
root [fieldparams.RootLength]byte,
) error {
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
eg, _ := errgroup.WithContext(ctx)
for _, sidecar := range sidecars {
roDataColumn, err := blocks.NewRODataColumnWithRoot(sidecar, root)
if err != nil {
return errors.Wrap(err, "new read-only data column with root")
}
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
eg.Go(func() error {
// Compute the subnet index based on the column index.
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
if err := vs.P2P.BroadcastDataColumnSidecar(root, subnet, sidecar); err != nil {
return errors.Wrap(err, "broadcast data column")
}
return nil
})
}
if err := eg.Wait(); err != nil {
return errors.Wrap(err, "wait for data columns to be broadcasted")
}
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
return errors.Wrap(err, "receive data column")
}
for _, verifiedRODataColumn := range verifiedRODataColumns {
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: operation.DataColumnSidecarReceived,
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
})
}
return nil
}
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
//
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.

View File

@@ -1,10 +1,8 @@
package validator
import (
"bytes"
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -17,7 +15,6 @@ import (
"github.com/OffchainLabs/prysm/v6/runtime/version"
"github.com/OffchainLabs/prysm/v6/time/slots"
"github.com/pkg/errors"
"github.com/prysmaticlabs/go-bitfield"
)
func (vs *Server) setSyncAggregate(ctx context.Context, blk interfaces.SignedBeaconBlock) {
@@ -54,28 +51,21 @@ func (vs *Server) getSyncAggregate(ctx context.Context, slot primitives.Slot, ro
if vs.SyncCommitteePool == nil {
return nil, errors.New("sync committee pool is nil")
}
poolContributions, err := vs.SyncCommitteePool.SyncCommitteeContributions(slot)
// Contributions have to match the input root
contributions, err := vs.SyncCommitteePool.SyncCommitteeContributions(slot)
if err != nil {
return nil, err
}
// Contributions have to match the input root
proposerContributions := proposerSyncContributions(poolContributions).filterByBlockRoot(root)
proposerContributions := proposerSyncContributions(contributions).filterByBlockRoot(root)
aggregatedContributions, err := vs.aggregatedSyncCommitteeMessages(ctx, slot, root, poolContributions)
if err != nil {
return nil, errors.Wrap(err, "could not get aggregated sync committee messages")
}
proposerContributions = append(proposerContributions, aggregatedContributions...)
subcommitteeCount := params.BeaconConfig().SyncCommitteeSubnetCount
// Each sync subcommittee is 128 bits and the sync committee is 512 bits for mainnet.
var bitsHolder [][]byte
for i := uint64(0); i < subcommitteeCount; i++ {
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
bitsHolder = append(bitsHolder, ethpb.NewSyncCommitteeAggregationBits())
}
sigsHolder := make([]bls.Signature, 0, params.BeaconConfig().SyncCommitteeSize/subcommitteeCount)
sigsHolder := make([]bls.Signature, 0, params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount)
for i := uint64(0); i < subcommitteeCount; i++ {
for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ {
cs := proposerContributions.filterBySubIndex(i)
aggregates, err := synccontribution.Aggregate(cs)
if err != nil {
@@ -117,107 +107,3 @@ func (vs *Server) getSyncAggregate(ctx context.Context, slot primitives.Slot, ro
SyncCommitteeSignature: syncSigBytes[:],
}, nil
}
func (vs *Server) aggregatedSyncCommitteeMessages(
ctx context.Context,
slot primitives.Slot,
root [32]byte,
poolContributions []*ethpb.SyncCommitteeContribution,
) ([]*ethpb.SyncCommitteeContribution, error) {
subcommitteeCount := params.BeaconConfig().SyncCommitteeSubnetCount
subcommitteeSize := params.BeaconConfig().SyncCommitteeSize / subcommitteeCount
sigsPerSubcommittee := make([][][]byte, subcommitteeCount)
bitsPerSubcommittee := make([]bitfield.Bitfield, subcommitteeCount)
for i := uint64(0); i < subcommitteeCount; i++ {
sigsPerSubcommittee[i] = make([][]byte, 0, subcommitteeSize)
bitsPerSubcommittee[i] = ethpb.NewSyncCommitteeAggregationBits()
}
// Get committee position(s) for each message's validator index.
scMessages, err := vs.SyncCommitteePool.SyncCommitteeMessages(slot)
if err != nil {
return nil, errors.Wrap(err, "could not get sync committee messages")
}
messageIndices := make([]primitives.ValidatorIndex, 0, len(scMessages))
messageSigs := make([][]byte, 0, len(scMessages))
for _, msg := range scMessages {
if bytes.Equal(root[:], msg.BlockRoot) {
messageIndices = append(messageIndices, msg.ValidatorIndex)
messageSigs = append(messageSigs, msg.Signature)
}
}
st, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, errors.Wrap(err, "could not get head state")
}
positions, err := helpers.CurrentPeriodPositions(st, messageIndices)
if err != nil {
return nil, errors.Wrap(err, "could not get sync committee positions")
}
// Based on committee position(s), set the appropriate subcommittee bit and signature.
for i, ci := range positions {
for _, index := range ci {
k := uint64(index)
subnetIndex := k / subcommitteeSize
indexMod := k % subcommitteeSize
// Existing aggregated contributions from the pool intersecting with aggregates
// created from single sync committee messages can result in bit intersections
// that fail to produce the best possible final aggregate. Ignoring bits that are
// already set in pool contributions makes intersections impossible.
intersects := false
for _, poolContrib := range poolContributions {
if poolContrib.SubcommitteeIndex == subnetIndex && poolContrib.AggregationBits.BitAt(indexMod) {
intersects = true
}
}
if !intersects && !bitsPerSubcommittee[subnetIndex].BitAt(indexMod) {
bitsPerSubcommittee[subnetIndex].SetBitAt(indexMod, true)
sigsPerSubcommittee[subnetIndex] = append(sigsPerSubcommittee[subnetIndex], messageSigs[i])
}
}
}
// Aggregate.
result := make([]*ethpb.SyncCommitteeContribution, 0, subcommitteeCount)
for i := uint64(0); i < subcommitteeCount; i++ {
aggregatedSig := make([]byte, 96)
aggregatedSig[0] = 0xC0
if len(sigsPerSubcommittee[i]) != 0 {
contrib, err := aggregateSyncSubcommitteeMessages(slot, root, i, bitsPerSubcommittee[i], sigsPerSubcommittee[i])
if err != nil {
// Skip aggregating this subcommittee
log.WithError(err).Errorf("Could not aggregate sync messages for subcommittee %d", i)
continue
}
result = append(result, contrib)
}
}
return result, nil
}
func aggregateSyncSubcommitteeMessages(
slot primitives.Slot,
root [32]byte,
subcommitteeIndex uint64,
bits bitfield.Bitfield,
sigs [][]byte,
) (*ethpb.SyncCommitteeContribution, error) {
var err error
uncompressedSigs := make([]bls.Signature, len(sigs))
for i, sig := range sigs {
uncompressedSigs[i], err = bls.SignatureFromBytesNoValidation(sig)
if err != nil {
return nil, errors.Wrap(err, "could not create signature from bytes")
}
}
return &ethpb.SyncCommitteeContribution{
Slot: slot,
BlockRoot: root[:],
SubcommitteeIndex: subcommitteeIndex,
AggregationBits: bits.Bytes(),
Signature: bls.AggregateSignatures(uncompressedSigs).Marshal(),
}, nil
}

View File

@@ -3,67 +3,13 @@ package validator
import (
"testing"
chainmock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/synccommittee"
mockSync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync/initial-sync/testing"
"github.com/OffchainLabs/prysm/v6/config/params"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/crypto/bls"
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v6/testing/assert"
"github.com/OffchainLabs/prysm/v6/testing/require"
"github.com/OffchainLabs/prysm/v6/testing/util"
"github.com/prysmaticlabs/go-bitfield"
)
func TestProposer_GetSyncAggregate_OK(t *testing.T) {
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
proposerServer := &Server{
HeadFetcher: &chainmock.ChainService{State: st},
SyncChecker: &mockSync.Sync{IsSyncing: false},
SyncCommitteePool: synccommittee.NewStore(),
}
r := params.BeaconConfig().ZeroHash
conts := []*ethpb.SyncCommitteeContribution{
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
{Slot: 2, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
{Slot: 2, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
{Slot: 2, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
{Slot: 2, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
}
for _, cont := range conts {
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeContribution(cont))
}
aggregate, err := proposerServer.getSyncAggregate(t.Context(), 1, bytesutil.ToBytes32(conts[0].BlockRoot))
require.NoError(t, err)
require.DeepEqual(t, bitfield.Bitvector32{0xf, 0xf, 0xf, 0xf}, aggregate.SyncCommitteeBits)
aggregate, err = proposerServer.getSyncAggregate(t.Context(), 2, bytesutil.ToBytes32(conts[0].BlockRoot))
require.NoError(t, err)
require.DeepEqual(t, bitfield.Bitvector32{0xaa, 0xaa, 0xaa, 0xaa}, aggregate.SyncCommitteeBits)
aggregate, err = proposerServer.getSyncAggregate(t.Context(), 3, bytesutil.ToBytes32(conts[0].BlockRoot))
require.NoError(t, err)
require.DeepEqual(t, bitfield.NewBitvector32(), aggregate.SyncCommitteeBits)
}
func TestServer_SetSyncAggregate_EmptyCase(t *testing.T) {
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlockAltair())
require.NoError(t, err)
@@ -79,123 +25,3 @@ func TestServer_SetSyncAggregate_EmptyCase(t *testing.T) {
}
require.DeepEqual(t, want, agg)
}
func TestProposer_GetSyncAggregate_IncludesSyncCommitteeMessages(t *testing.T) {
// TEST SETUP
// - validator 0 is selected twice in subcommittee 0 (indexes [0,1])
// - validator 1 is selected once in subcommittee 0 (index 2)
// - validator 2 is selected twice in subcommittee 1 (indexes [0,1])
// - validator 3 is selected once in subcommittee 1 (index 2)
// - sync committee aggregates in the pool have index 3 set for both subcommittees
subcommitteeSize := params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount
helpers.ClearCache()
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
vals := make([]*ethpb.Validator, 4)
vals[0] = &ethpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf0}, 48)}
vals[1] = &ethpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf1}, 48)}
vals[2] = &ethpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf2}, 48)}
vals[3] = &ethpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf3}, 48)}
require.NoError(t, st.SetValidators(vals))
sc := &ethpb.SyncCommittee{
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
}
sc.Pubkeys[0] = vals[0].PublicKey
sc.Pubkeys[1] = vals[0].PublicKey
sc.Pubkeys[2] = vals[1].PublicKey
sc.Pubkeys[subcommitteeSize] = vals[2].PublicKey
sc.Pubkeys[subcommitteeSize+1] = vals[2].PublicKey
sc.Pubkeys[subcommitteeSize+2] = vals[3].PublicKey
require.NoError(t, st.SetCurrentSyncCommittee(sc))
proposerServer := &Server{
HeadFetcher: &chainmock.ChainService{State: st},
SyncChecker: &mockSync.Sync{IsSyncing: false},
SyncCommitteePool: synccommittee.NewStore(),
}
r := params.BeaconConfig().ZeroHash
msgs := []*ethpb.SyncCommitteeMessage{
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 0, Signature: bls.NewAggregateSignature().Marshal()},
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 1, Signature: bls.NewAggregateSignature().Marshal()},
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 2, Signature: bls.NewAggregateSignature().Marshal()},
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 3, Signature: bls.NewAggregateSignature().Marshal()},
}
for _, msg := range msgs {
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeMessage(msg))
}
subcommittee0AggBits := ethpb.NewSyncCommitteeAggregationBits()
subcommittee0AggBits.SetBitAt(3, true)
subcommittee1AggBits := ethpb.NewSyncCommitteeAggregationBits()
subcommittee1AggBits.SetBitAt(3, true)
conts := []*ethpb.SyncCommitteeContribution{
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: subcommittee0AggBits, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: subcommittee1AggBits, BlockRoot: r[:]},
}
for _, cont := range conts {
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeContribution(cont))
}
// The final sync aggregates must have indexes [0,1,2,3] set for both subcommittees
sa, err := proposerServer.getSyncAggregate(t.Context(), 1, r)
require.NoError(t, err)
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(0))
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(1))
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(2))
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(3))
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(subcommitteeSize))
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(subcommitteeSize+1))
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(subcommitteeSize+2))
assert.Equal(t, true, sa.SyncCommitteeBits.BitAt(subcommitteeSize+3))
}
func Test_aggregatedSyncCommitteeMessages_NoIntersectionWithPoolContributions(t *testing.T) {
helpers.ClearCache()
st, err := util.NewBeaconStateAltair()
require.NoError(t, err)
vals := make([]*ethpb.Validator, 4)
vals[0] = &ethpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf0}, 48)}
vals[1] = &ethpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf1}, 48)}
vals[2] = &ethpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf2}, 48)}
vals[3] = &ethpb.Validator{PublicKey: bytesutil.PadTo([]byte{0xf3}, 48)}
require.NoError(t, st.SetValidators(vals))
sc := &ethpb.SyncCommittee{
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
}
sc.Pubkeys[0] = vals[0].PublicKey
sc.Pubkeys[1] = vals[1].PublicKey
sc.Pubkeys[2] = vals[2].PublicKey
sc.Pubkeys[3] = vals[3].PublicKey
require.NoError(t, st.SetCurrentSyncCommittee(sc))
proposerServer := &Server{
HeadFetcher: &chainmock.ChainService{State: st},
SyncChecker: &mockSync.Sync{IsSyncing: false},
SyncCommitteePool: synccommittee.NewStore(),
}
r := params.BeaconConfig().ZeroHash
msgs := []*ethpb.SyncCommitteeMessage{
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 0, Signature: bls.NewAggregateSignature().Marshal()},
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 1, Signature: bls.NewAggregateSignature().Marshal()},
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 2, Signature: bls.NewAggregateSignature().Marshal()},
{Slot: 1, BlockRoot: r[:], ValidatorIndex: 3, Signature: bls.NewAggregateSignature().Marshal()},
}
for _, msg := range msgs {
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeMessage(msg))
}
subcommitteeAggBits := ethpb.NewSyncCommitteeAggregationBits()
subcommitteeAggBits.SetBitAt(3, true)
cont := &ethpb.SyncCommitteeContribution{
Slot: 1,
SubcommitteeIndex: 0,
Signature: bls.NewAggregateSignature().Marshal(),
AggregationBits: subcommitteeAggBits,
BlockRoot: r[:],
}
aggregated, err := proposerServer.aggregatedSyncCommitteeMessages(t.Context(), 1, r, []*ethpb.SyncCommitteeContribution{cont})
require.NoError(t, err)
require.Equal(t, 1, len(aggregated))
assert.Equal(t, false, aggregated[0].AggregationBits.BitAt(3))
}

View File

@@ -4,23 +4,16 @@ import (
"context"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
)
func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*ethpb.ProposerSlashing, []ethpb.AttSlashing) {
var err error
exitInfo := v.ExitInformation(head)
if err := helpers.UpdateTotalActiveBalanceCache(head, exitInfo.TotalActiveBalance); err != nil {
log.WithError(err).Warn("Could not update total active balance cache")
}
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
for _, slashing := range proposerSlashings {
_, err = blocks.ProcessProposerSlashing(ctx, head, slashing, exitInfo)
_, err := blocks.ProcessProposerSlashing(ctx, head, slashing, v.SlashValidator)
if err != nil {
log.WithError(err).Warn("Could not validate proposer slashing for block inclusion")
continue
@@ -30,7 +23,7 @@ func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
validAttSlashings := make([]ethpb.AttSlashing, 0, len(attSlashings))
for _, slashing := range attSlashings {
_, err = blocks.ProcessAttesterSlashing(ctx, head, slashing, exitInfo)
_, err := blocks.ProcessAttesterSlashing(ctx, head, slashing, v.SlashValidator)
if err != nil {
log.WithError(err).Warn("Could not validate attester slashing for block inclusion")
continue

View File

@@ -6,7 +6,6 @@ import (
"testing"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
builderTest "github.com/OffchainLabs/prysm/v6/beacon-chain/builder/testing"
@@ -895,9 +894,6 @@ func injectSlashings(t *testing.T, st state.BeaconState, keys []bls.SecretKey, s
}
func TestProposer_ProposeBlock_OK(t *testing.T) {
// Initialize KZG for Fulu blocks
require.NoError(t, kzg.Start())
tests := []struct {
name string
block func([32]byte) *ethpb.GenericSignedBeaconBlock
@@ -1102,131 +1098,6 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
},
err: "blob KZG commitments don't match number of blobs or KZG proofs",
},
{
name: "fulu block no blob",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
sb := &ethpb.SignedBeaconBlockContentsFulu{
Block: &ethpb.SignedBeaconBlockFulu{
Block: &ethpb.BeaconBlockElectra{Slot: 5, ParentRoot: parent[:], Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{})},
},
}
blk := &ethpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
},
{
name: "fulu block with single blob and cell proofs",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
numberOfColumns := uint64(128)
// For Fulu, we have cell proofs (blobs * numberOfColumns)
cellProofs := make([][]byte, numberOfColumns)
for i := uint64(0); i < numberOfColumns; i++ {
cellProofs[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
}
// Blob must be exactly 131072 bytes
blob := make([]byte, 131072)
blob[0] = 0x01
sb := &ethpb.SignedBeaconBlockContentsFulu{
Block: &ethpb.SignedBeaconBlockFulu{
Block: &ethpb.BeaconBlockElectra{
Slot: 5, ParentRoot: parent[:],
Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{
BlobKzgCommitments: [][]byte{bytesutil.PadTo([]byte("kc"), 48)},
}),
},
},
KzgProofs: cellProofs,
Blobs: [][]byte{blob},
}
blk := &ethpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
},
{
name: "fulu block with multiple blobs and cell proofs",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
numberOfColumns := uint64(128)
blobCount := 3
// For Fulu, we have cell proofs (blobs * numberOfColumns)
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
for i := range cellProofs {
cellProofs[i] = bytesutil.PadTo([]byte{byte(i % 256)}, 48)
}
// Create properly sized blobs (131072 bytes each)
blobs := make([][]byte, blobCount)
for i := 0; i < blobCount; i++ {
blob := make([]byte, 131072)
blob[0] = byte(i + 1)
blobs[i] = blob
}
sb := &ethpb.SignedBeaconBlockContentsFulu{
Block: &ethpb.SignedBeaconBlockFulu{
Block: &ethpb.BeaconBlockElectra{
Slot: 5, ParentRoot: parent[:],
Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{
BlobKzgCommitments: [][]byte{
bytesutil.PadTo([]byte("kc"), 48),
bytesutil.PadTo([]byte("kc1"), 48),
bytesutil.PadTo([]byte("kc2"), 48),
},
}),
},
},
KzgProofs: cellProofs,
Blobs: blobs,
}
blk := &ethpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
},
{
name: "fulu block wrong cell proof count (should be blobs * 128)",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
// Wrong number of cell proofs - should be 2 * 128 = 256, but providing only 2
// Create properly sized blobs
blob1 := make([]byte, 131072)
blob1[0] = 0x01
blob2 := make([]byte, 131072)
blob2[0] = 0x02
sb := &ethpb.SignedBeaconBlockContentsFulu{
Block: &ethpb.SignedBeaconBlockFulu{
Block: &ethpb.BeaconBlockElectra{
Slot: 5, ParentRoot: parent[:],
Body: util.HydrateBeaconBlockBodyElectra(&ethpb.BeaconBlockBodyElectra{
BlobKzgCommitments: [][]byte{
bytesutil.PadTo([]byte("kc"), 48),
bytesutil.PadTo([]byte("kc1"), 48),
},
}),
},
},
KzgProofs: [][]byte{{0x01}, {0x02}}, // Wrong: should be 256 cell proofs
Blobs: [][]byte{blob1, blob2},
}
blk := &ethpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
return &ethpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
},
err: "blobs and cells proofs mismatch",
},
{
name: "blind fulu block with blob commitments",
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
blockToPropose := util.NewBlindedBeaconBlockFulu()
blockToPropose.Message.Slot = 5
blockToPropose.Message.ParentRoot = parent[:]
txRoot, err := ssz.TransactionsRoot([][]byte{})
require.NoError(t, err)
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*enginev1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
require.NoError(t, err)
blockToPropose.Message.Body.ExecutionPayloadHeader.TransactionsRoot = txRoot[:]
blockToPropose.Message.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
blockToPropose.Message.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}
blk := &ethpb.GenericSignedBeaconBlock_BlindedFulu{BlindedFulu: blockToPropose}
return &ethpb.GenericSignedBeaconBlock{Block: blk}
},
useBuilder: true,
err: "commitment value doesn't match block", // Known issue with mock builder cell proof mismatch
},
}
for _, tt := range tests {
@@ -1240,29 +1111,15 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
db := dbutil.SetupDB(t)
// Create cell proofs for Fulu blocks (128 proofs per blob)
numberOfColumns := uint64(128)
cellProofs := make([][]byte, numberOfColumns)
for i := uint64(0); i < numberOfColumns; i++ {
cellProofs[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
}
// Create properly sized blob for mock builder
mockBlob := make([]byte, 131072)
mockBlob[0] = 0x03
// Use the same commitment as in the blind block test
mockCommitment := bytesutil.PadTo([]byte{0x01}, 48)
proposerServer := &Server{
BlockReceiver: c,
BlockNotifier: c.BlockNotifier(),
P2P: mockp2p.NewTestP2P(t),
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: tt.useBuilder, PayloadCapella: emptyPayloadCapella(), PayloadDeneb: emptyPayloadDeneb(),
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{mockCommitment}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}},
BlobBundleV2: &enginev1.BlobsBundleV2{KzgCommitments: [][]byte{mockCommitment}, Proofs: cellProofs, Blobs: [][]byte{mockBlob}}},
BeaconDB: db,
BlobReceiver: c,
DataColumnReceiver: c, // Add DataColumnReceiver for Fulu blocks
OperationNotifier: c.OperationNotifier(),
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}}},
BeaconDB: db,
BlobReceiver: c,
OperationNotifier: c.OperationNotifier(),
}
blockToPropose := tt.block(bsRoot)
res, err := proposerServer.ProposeBeaconBlock(t.Context(), blockToPropose)
@@ -3096,6 +2953,49 @@ func TestProposer_DeleteAttsInPool_Aggregated(t *testing.T) {
assert.Equal(t, 0, len(atts), "Did not delete unaggregated attestation")
}
func TestProposer_GetSyncAggregate_OK(t *testing.T) {
proposerServer := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
SyncCommitteePool: synccommittee.NewStore(),
}
r := params.BeaconConfig().ZeroHash
conts := []*ethpb.SyncCommitteeContribution{
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b0001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1001}, BlockRoot: r[:]},
{Slot: 1, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b1110}, BlockRoot: r[:]},
{Slot: 2, SubcommitteeIndex: 0, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
{Slot: 2, SubcommitteeIndex: 1, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
{Slot: 2, SubcommitteeIndex: 2, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
{Slot: 2, SubcommitteeIndex: 3, Signature: bls.NewAggregateSignature().Marshal(), AggregationBits: []byte{0b10101010}, BlockRoot: r[:]},
}
for _, cont := range conts {
require.NoError(t, proposerServer.SyncCommitteePool.SaveSyncCommitteeContribution(cont))
}
aggregate, err := proposerServer.getSyncAggregate(t.Context(), 1, bytesutil.ToBytes32(conts[0].BlockRoot))
require.NoError(t, err)
require.DeepEqual(t, bitfield.Bitvector32{0xf, 0xf, 0xf, 0xf}, aggregate.SyncCommitteeBits)
aggregate, err = proposerServer.getSyncAggregate(t.Context(), 2, bytesutil.ToBytes32(conts[0].BlockRoot))
require.NoError(t, err)
require.DeepEqual(t, bitfield.Bitvector32{0xaa, 0xaa, 0xaa, 0xaa}, aggregate.SyncCommitteeBits)
aggregate, err = proposerServer.getSyncAggregate(t.Context(), 3, bytesutil.ToBytes32(conts[0].BlockRoot))
require.NoError(t, err)
require.DeepEqual(t, bitfield.NewBitvector32(), aggregate.SyncCommitteeBits)
}
func TestProposer_PrepareBeaconProposer(t *testing.T) {
type args struct {
request *ethpb.PrepareBeaconProposerRequest

View File

@@ -69,7 +69,6 @@ type Server struct {
SyncCommitteePool synccommittee.Pool
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
MockEth1Votes bool
Eth1BlockFetcher execution.POWBlockFetcher
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher

View File

@@ -89,7 +89,6 @@ type Config struct {
AttestationReceiver blockchain.AttestationReceiver
BlockReceiver blockchain.BlockReceiver
BlobReceiver blockchain.BlobReceiver
DataColumnReceiver blockchain.DataColumnReceiver
ExecutionChainService execution.Chain
ChainStartFetcher execution.ChainStartFetcher
ExecutionChainInfoFetcher execution.ChainInfoFetcher
@@ -121,7 +120,6 @@ type Config struct {
Router *http.ServeMux
ClockWaiter startup.ClockWaiter
BlobStorage *filesystem.BlobStorage
DataColumnStorage *filesystem.DataColumnStorage
TrackedValidatorsCache *cache.TrackedValidatorsCache
PayloadIDCache *cache.PayloadIDCache
LCStore *lightClient.Store
@@ -198,7 +196,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
BlobStorage: s.cfg.BlobStorage,
DataColumnStorage: s.cfg.DataColumnStorage,
}
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
coreService := &core.Service{
@@ -239,7 +236,6 @@ func NewService(ctx context.Context, cfg *Config) *Service {
P2P: s.cfg.Broadcaster,
BlockReceiver: s.cfg.BlockReceiver,
BlobReceiver: s.cfg.BlobReceiver,
DataColumnReceiver: s.cfg.DataColumnReceiver,
MockEth1Votes: s.cfg.MockEth1Votes,
Eth1BlockFetcher: s.cfg.ExecutionChainService,
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,

View File

@@ -8,7 +8,6 @@ go_library(
"mock_blocker.go",
"mock_exec_chain_info_fetcher.go",
"mock_genesis_timefetcher.go",
"mock_sidecars.go",
"mock_stater.go",
],
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil",

View File

@@ -1,44 +0,0 @@
package testutil
import ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
// CreateDataColumnSidecar generates a filled dummy data column sidecar
func CreateDataColumnSidecar(index uint64, data []byte) *ethpb.DataColumnSidecar {
return &ethpb.DataColumnSidecar{
Index: index,
Column: [][]byte{data},
SignedBlockHeader: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
KzgCommitments: [][]byte{make([]byte, 48)},
KzgProofs: [][]byte{make([]byte, 48)},
KzgCommitmentsInclusionProof: [][]byte{make([]byte, 32)},
}
}
// CreateBlobSidecar generates a filled dummy data blob sidecar
func CreateBlobSidecar(index uint64, blob []byte) *ethpb.BlobSidecar {
return &ethpb.BlobSidecar{
Index: index,
Blob: blob,
SignedBlockHeader: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 1,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
KzgCommitment: make([]byte, 48),
KzgProof: make([]byte, 48),
}
}

View File

@@ -67,13 +67,6 @@ func WithNower(n Nower) ClockOpt {
}
}
// WithTimeAsNow will create a Nower based on the given time.Time and set it as the Now() implementation.
func WithTimeAsNow(t time.Time) ClockOpt {
return func(g *Clock) {
g.now = func() time.Time { return t }
}
}
// NewClock constructs a Clock value from a genesis timestamp (t) and a Genesis Validator Root (vr).
// The WithNower ClockOpt can be used in tests to specify an alternate `time.Now` implementation,
// for instance to return a value for `Now` spanning a certain number of slots from genesis time, to control the current slot.

View File

@@ -265,7 +265,6 @@ type WriteOnlyEth1Data interface {
AppendEth1DataVotes(val *ethpb.Eth1Data) error
SetEth1DepositIndex(val uint64) error
ExitEpochAndUpdateChurn(exitBalance primitives.Gwei) (primitives.Epoch, error)
ExitEpochAndUpdateChurnForTotalBal(totalActiveBalance primitives.Gwei, exitBalance primitives.Gwei) (primitives.Epoch, error)
}
// WriteOnlyValidators defines a struct which only has write access to validators methods.

View File

@@ -44,27 +44,11 @@ func (b *BeaconState) ExitEpochAndUpdateChurn(exitBalance primitives.Gwei) (prim
return 0, err
}
return b.exitEpochAndUpdateChurn(primitives.Gwei(activeBal), exitBalance)
}
// ExitEpochAndUpdateChurnForTotalBal has the same functionality as ExitEpochAndUpdateChurn,
// the only difference being how total active balance is obtained. In ExitEpochAndUpdateChurn
// it is calculated inside the function and in ExitEpochAndUpdateChurnForTotalBal it's a
// function argument.
func (b *BeaconState) ExitEpochAndUpdateChurnForTotalBal(totalActiveBalance primitives.Gwei, exitBalance primitives.Gwei) (primitives.Epoch, error) {
if b.version < version.Electra {
return 0, errNotSupported("ExitEpochAndUpdateChurnForTotalBal", b.version)
}
return b.exitEpochAndUpdateChurn(totalActiveBalance, exitBalance)
}
func (b *BeaconState) exitEpochAndUpdateChurn(totalActiveBalance primitives.Gwei, exitBalance primitives.Gwei) (primitives.Epoch, error) {
b.lock.Lock()
defer b.lock.Unlock()
earliestExitEpoch := max(b.earliestExitEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(b.slot)))
perEpochChurn := helpers.ActivationExitChurnLimit(totalActiveBalance) // Guaranteed to be non-zero.
perEpochChurn := helpers.ActivationExitChurnLimit(primitives.Gwei(activeBal)) // Guaranteed to be non-zero.
// New epoch for exits
var exitBalanceToConsume primitives.Gwei

View File

@@ -5,8 +5,10 @@ go_library(
srcs = [
"batch_verifier.go",
"block_batcher.go",
"broadcast_bls_changes.go",
"context.go",
"custody.go",
"data_column_assignment.go",
"data_column_sidecars.go",
"data_columns_reconstruct.go",
"deadlines.go",
@@ -136,6 +138,7 @@ go_library(
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
"@com_github_hashicorp_golang_lru//:go_default_library",
"@com_github_libp2p_go_libp2p//core:go_default_library",
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
@@ -164,6 +167,7 @@ go_test(
"batch_verifier_test.go",
"blobs_test.go",
"block_batcher_test.go",
"broadcast_bls_changes_test.go",
"context_test.go",
"custody_test.go",
"data_column_sidecars_test.go",
@@ -192,7 +196,6 @@ go_test(
"slot_aware_cache_test.go",
"subscriber_beacon_aggregate_proof_test.go",
"subscriber_beacon_blocks_test.go",
"subscriber_data_column_sidecar_test.go",
"subscriber_test.go",
"subscription_topic_handler_test.go",
"sync_fuzz_test.go",
@@ -265,7 +268,6 @@ go_test(
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/equality:go_default_library",
"//genesis:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/attestation:go_default_library",

View File

@@ -6,6 +6,8 @@ go_library(
"batch.go",
"batcher.go",
"blobs.go",
"columns.go",
"fulu_transition.go",
"log.go",
"metrics.go",
"pool.go",
@@ -18,6 +20,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/das:go_default_library",
"//beacon-chain/db:go_default_library",
@@ -66,6 +69,7 @@ go_test(
"//beacon-chain/das:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/filesystem:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",

View File

@@ -6,7 +6,6 @@ import (
"sort"
"time"
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
@@ -38,8 +37,10 @@ func (s batchState) String() string {
return "import_complete"
case batchEndSequence:
return "end_sequence"
case batchBlobSync:
return "blob_sync"
case batchSyncBlobs:
return "sync_blobs"
case batchSyncColumns:
return "sync_columns"
default:
return "unknown"
}
@@ -50,7 +51,9 @@ const (
batchInit
batchSequenced
batchErrRetryable
batchBlobSync
batchErrFatal
batchSyncBlobs
batchSyncColumns
batchImportable
batchImportComplete
batchEndSequence
@@ -72,9 +75,12 @@ type batch struct {
err error
state batchState
busy peer.ID
nextReqCols []uint64
blockPid peer.ID
blobPid peer.ID
columnPid peer.ID
bs *blobSync
cs *columnSync
}
func (b batch) logFields() logrus.Fields {
@@ -93,6 +99,9 @@ func (b batch) logFields() logrus.Fields {
if b.retries > 0 {
f["retryAfter"] = b.retryAfter.String()
}
if b.state == batchSyncColumns {
f["nextColumns"] = fmt.Sprintf("%v", b.nextReqCols)
}
return f
}
@@ -136,22 +145,29 @@ func (b batch) blobRequest() *eth.BlobSidecarsByRangeRequest {
}
}
func (b batch) withResults(results verifiedROBlocks, bs *blobSync) batch {
func (b batch) postBlockSync(results verifiedROBlocks, bs *blobSync, cs *columnSync) batch {
b.results = results
b.bs = bs
b.cs = cs
if bs.blobsNeeded() > 0 {
return b.withState(batchBlobSync)
return b.withState(batchSyncBlobs)
}
if len(cs.columnsNeeded()) > 0 {
return b.withState(batchSyncColumns)
}
return b.withState(batchImportable)
}
func (b batch) postBlobSync() batch {
func (b batch) postSidecarSync() batch {
if b.blobsNeeded() > 0 {
log.WithFields(b.logFields()).WithField("blobsMissing", b.blobsNeeded()).Error("Batch still missing blobs after downloading from peer")
b.bs = nil
b.results = []blocks.ROBlock{}
return b.withState(batchErrRetryable)
}
if len(b.cs.columnsNeeded()) > 0 {
return b.withState(batchSyncColumns)
}
return b.withState(batchImportable)
}
@@ -187,6 +203,11 @@ func (b batch) withRetryableError(err error) batch {
return b.withState(batchErrRetryable)
}
func (b batch) withFatalError(err error) batch {
b.err = errors.Wrap(err, "fatal erorr in batch")
return b.withState(batchErrFatal)
}
func (b batch) blobsNeeded() int {
return b.bs.blobsNeeded()
}
@@ -195,8 +216,8 @@ func (b batch) blobResponseValidator() sync.BlobResponseValidation {
return b.bs.validateNext
}
func (b batch) availabilityStore() das.AvailabilityStore {
return b.bs.store
func (b batch) validatingColumnRequest() *validatingColumnRequest {
return b.cs.newValidatingColumnRequest(b.nextReqCols)
}
var batchBlockUntil = func(ctx context.Context, untilRetry time.Duration, b batch) error {
@@ -223,6 +244,18 @@ func (b batch) waitUntilReady(ctx context.Context) error {
return nil
}
func (b batch) workComplete() bool {
return b.state == batchImportable
}
func (b batch) selectPeer(ranking *sync.ColumnScarcityRanking, busy map[peer.ID]bool) (peer.ID, []uint64, error) {
if b.state == batchSyncColumns {
return ranking.ForColumns(b.cs.columnsNeeded(), busy)
}
peer, err := ranking.ForBlocks(busy)
return peer, nil, err
}
func sortBatchDesc(bb []batch) {
sort.Slice(bb, func(i, j int) bool {
return bb[i].end > bb[j].end

View File

@@ -48,7 +48,7 @@ func newBlobSync(current primitives.Slot, vbs verifiedROBlocks, cfg *blobSyncCon
type blobVerifierMap map[[32]byte][]verification.BlobVerifier
type blobSync struct {
store das.AvailabilityStore
store *das.LazilyPersistentStoreBlob
expected []blobSummary
next int
bbv *blobBatchVerifier

Some files were not shown because too many files have changed in this diff Show More