mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
57 Commits
reward-ove
...
findPeersW
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
663b53829c | ||
|
|
4b09dd4aa5 | ||
|
|
1dab5a9f8a | ||
|
|
d681232fe6 | ||
|
|
1d24f89c96 | ||
|
|
967193e6a2 | ||
|
|
9e40551852 | ||
|
|
a8cab58f7e | ||
|
|
df86f57507 | ||
|
|
9b0a3e9632 | ||
|
|
5e079aa62c | ||
|
|
5c68ec5c39 | ||
|
|
5410232bef | ||
|
|
3f5c4df7e0 | ||
|
|
5c348dff59 | ||
|
|
8136ff7c3a | ||
|
|
f690af81fa | ||
|
|
029b896c79 | ||
|
|
e1117a7de2 | ||
|
|
39b2a02f66 | ||
|
|
4e8a710b64 | ||
|
|
7191a5bcdf | ||
|
|
d335a52c49 | ||
|
|
c7401f5e75 | ||
|
|
0057cc57b5 | ||
|
|
b1dc5e485d | ||
|
|
f035da6fc5 | ||
|
|
854f4bc9a3 | ||
|
|
1933adedbf | ||
|
|
278b796e43 | ||
|
|
8e52d0c3c6 | ||
|
|
d339e09509 | ||
|
|
8ec460223c | ||
|
|
349d9d2fd0 | ||
|
|
e0aecb9c32 | ||
|
|
4a1ab70929 | ||
|
|
240cd1d058 | ||
|
|
26d8b6b786 | ||
|
|
92c359456e | ||
|
|
d48ed44c4c | ||
|
|
dbab624f3d | ||
|
|
83e9cc3dbb | ||
|
|
ee03c7cce2 | ||
|
|
c5135f6995 | ||
|
|
29aedac113 | ||
|
|
08fb3812b7 | ||
|
|
07738dd9a4 | ||
|
|
53df29b07f | ||
|
|
00cf1f2507 | ||
|
|
6528fb9cea | ||
|
|
5021131811 | ||
|
|
26cec9d9c7 | ||
|
|
4ed90a02ef | ||
|
|
7d528c75bb | ||
|
|
e7b2953d5a | ||
|
|
acf35e849e | ||
|
|
c826d334a1 |
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: 🐞 Bug report
|
||||
description: Report a bug or problem with running Prysm
|
||||
labels: ["Bug"]
|
||||
type: "Bug"
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
43
.github/workflows/check-specrefs.yml
vendored
Normal file
43
.github/workflows/check-specrefs.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Check Spec References
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check-specrefs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
|
||||
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
|
||||
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
|
||||
echo "Version mismatch between WORKSPACE and ethspecify"
|
||||
echo " WORKSPACE: $WORKSPACE_VERSION"
|
||||
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||
exit 1
|
||||
else
|
||||
echo "Versions match: $WORKSPACE_VERSION"
|
||||
fi
|
||||
|
||||
- name: Install ethspecify
|
||||
run: python3 -mpip install ethspecify
|
||||
|
||||
- name: Update spec references
|
||||
run: ethspecify process --path=specrefs
|
||||
|
||||
- name: Check for differences
|
||||
run: |
|
||||
if ! git diff --exit-code specrefs >/dev/null; then
|
||||
echo "Spec references are out-of-date!"
|
||||
echo ""
|
||||
git --no-pager diff specrefs
|
||||
exit 1
|
||||
else
|
||||
echo "Spec references are up-to-date!"
|
||||
fi
|
||||
|
||||
- name: Check spec references
|
||||
run: ethspecify check --path=specrefs
|
||||
@@ -2993,7 +2993,7 @@ There are two known issues with this release:
|
||||
|
||||
### Added
|
||||
|
||||
- Web3Signer support. See the [documentation](https://docs.prylabs.network/docs/next/wallet/web3signer) for more
|
||||
- Web3Signer support. See the [documentation](https://prysm.offchainlabs.com/docs/manage-wallet/web3signer/) for more
|
||||
details.
|
||||
- Bellatrix support. See [kiln testnet instructions](https://hackmd.io/OqIoTiQvS9KOIataIFksBQ?view)
|
||||
- Weak subjectivity sync / checkpoint sync. This is an experimental feature and may have unintended side effects for
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Prysm is go project with many complicated dependencies, including some c++ based libraries. There
|
||||
are two parts to Prysm's dependency management. Go modules and bazel managed dependencies. Be sure
|
||||
to read [Why Bazel?](https://github.com/OffchainLabs/documentation/issues/138) to fully
|
||||
to read [Why Bazel?](https://prysm.offchainlabs.com/docs/install-prysm/install-with-bazel/#why-bazel) to fully
|
||||
understand the reasoning behind an additional layer of build tooling via Bazel rather than a pure
|
||||
"go build" project.
|
||||
|
||||
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -253,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.1"
|
||||
consensus_spec_version = "v1.6.0-alpha.6"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
|
||||
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
|
||||
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
|
||||
"general": "sha256-7wkWuahuCO37uVYnxq8Badvi+jY907pBj68ixL8XDOI=",
|
||||
"minimal": "sha256-Qy/f27N0LffS/ej7VhIubwDejD6LMK0VdenKkqtZVt4=",
|
||||
"mainnet": "sha256-3H7mu5yE+FGz2Wr/nc8Nd9aEu93YoEpsYtn0zBSoeDE=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
|
||||
integrity = "sha256-uvz3XfMTGfy3/BtQQoEp5XQOgrWgcH/5Zo/gR0iiP+k=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package httprest
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/middleware"
|
||||
)
|
||||
|
||||
@@ -27,7 +27,7 @@ go_library(
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"setup_forchoice.go",
|
||||
"setup_forkchoice.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
|
||||
@@ -102,7 +102,6 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,32 +1,14 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(sidecars[0].Blob),
|
||||
bytesToCommitment(sidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(sidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(sidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
@@ -42,3 +24,144 @@ func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
if len(blobSidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(blobSidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobSidecars[0].Blob),
|
||||
bytesToCommitment(blobSidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(blobSidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(blobSidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
|
||||
for i, sidecar := range blobSidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
|
||||
// This is more efficient than verifying each blob individually when len(blobs) > 1.
|
||||
// For single blob verification, it uses the optimized single verification path.
|
||||
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
|
||||
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
|
||||
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
|
||||
}
|
||||
|
||||
if len(blobs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Optimize for single blob case - use single verification to avoid batch overhead
|
||||
if len(blobs) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobs[0]),
|
||||
bytesToCommitment(commitments[0]),
|
||||
bytesToKZGProof(proofs[0]))
|
||||
}
|
||||
|
||||
// Use batch verification for multiple blobs
|
||||
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
|
||||
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
|
||||
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
|
||||
|
||||
for i := range blobs {
|
||||
if len(blobs[i]) != len(ckzg4844.Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
|
||||
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
|
||||
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
|
||||
}
|
||||
|
||||
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("batch KZG proof verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
|
||||
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
|
||||
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
|
||||
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
|
||||
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
|
||||
blobCount := uint64(len(blobs))
|
||||
expectedCellProofs := blobCount * numberOfColumns
|
||||
|
||||
if uint64(len(cellProofs)) != expectedCellProofs {
|
||||
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
|
||||
}
|
||||
|
||||
if len(commitments) != len(blobs) {
|
||||
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
|
||||
}
|
||||
|
||||
if blobCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle multiple blobs - compute cells for all blobs
|
||||
allCells := make([]Cell, 0, expectedCellProofs)
|
||||
allCommitments := make([]Bytes48, 0, expectedCellProofs)
|
||||
allIndices := make([]uint64, 0, expectedCellProofs)
|
||||
allProofs := make([]Bytes48, 0, expectedCellProofs)
|
||||
|
||||
for blobIndex := range blobs {
|
||||
if len(blobs[blobIndex]) != len(Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
|
||||
}
|
||||
// Convert blob to kzg.Blob type
|
||||
blob := Blob(blobs[blobIndex])
|
||||
|
||||
// Compute cells for this blob
|
||||
cells, err := ComputeCells(&blob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
// Add cells and corresponding data for each column
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
|
||||
if len(commitments[blobIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
|
||||
}
|
||||
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
|
||||
}
|
||||
allCells = append(allCells, cells[columnIndex])
|
||||
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
|
||||
allIndices = append(allIndices, columnIndex)
|
||||
|
||||
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
|
||||
}
|
||||
}
|
||||
|
||||
// Batch verify all cells
|
||||
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cell batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("cell KZG proof batch verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
|
||||
}
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(sidecars...))
|
||||
blobSidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(blobSidecars...))
|
||||
}
|
||||
|
||||
func TestBytesToAny(t *testing.T) {
|
||||
@@ -37,6 +37,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -45,3 +46,432 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob batch", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob batch", func(t *testing.T) {
|
||||
blobCount := 3
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
proofs[i] = proof[:]
|
||||
}
|
||||
|
||||
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched input lengths", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test different mismatch scenarios
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{},
|
||||
[][]byte{proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
|
||||
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:], blob[:]},
|
||||
[][]byte{commitment[:]},
|
||||
[][]byte{proof[:], proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
_, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a different blob's commitment (mismatch)
|
||||
differentBlob := random.GetRandBlob(456)
|
||||
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
// Single blob optimization uses different error message
|
||||
require.ErrorContains(t, "can't verify opening proof", err)
|
||||
})
|
||||
|
||||
t.Run("invalid proof should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use wrong proof
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "short buffer", err)
|
||||
})
|
||||
|
||||
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
|
||||
// First blob - valid
|
||||
blob1 := random.GetRandBlob(123)
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second blob - invalid proof
|
||||
blob2 := random.GetRandBlob(456)
|
||||
commitment2, _, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment2[:]}
|
||||
proofs := [][]byte{proof1[:], invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("batch KZG proof verification failed", func(t *testing.T) {
|
||||
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
|
||||
blob1 := random.GetRandBlob(123)
|
||||
blob2 := random.GetRandBlob(456)
|
||||
|
||||
// Generate valid proof for blob1
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate valid proof for blob2 but use wrong commitment (from blob1)
|
||||
_, proof2, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
|
||||
proofs := [][]byte{proof1[:], proof2[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch KZG proof verification failed", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create flattened cell proofs (like execution client format)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := range blobCount {
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs for this blob
|
||||
for j := range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched blob and commitment count", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{}, // Empty commitments
|
||||
[][]byte{},
|
||||
128,
|
||||
)
|
||||
require.ErrorContains(t, "expected 128 cell proofs", err)
|
||||
})
|
||||
|
||||
t.Run("wrong cell proof count", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
|
||||
wrongCellProofs := make([][]byte, 10)
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proofs should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Create invalid cell proofs (all zeros)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
invalidCellProofs[i] = make([]byte, 48) // All zeros
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("mismatched commitment should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and correct cell proofs
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate wrong commitment from different blob
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var differentBlob Blob
|
||||
copy(differentBlob[:], randBlob2[:])
|
||||
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
|
||||
})
|
||||
|
||||
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Create invalid blob (not properly formatted)
|
||||
invalidBlobData := make([]byte, 10) // Too short
|
||||
commitment := make([]byte, 48) // Dummy commitment
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{invalidBlobData}
|
||||
commitments := [][]byte{commitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
// Create invalid commitment (wrong size)
|
||||
invalidCommitment := make([]byte, 32) // Should be 48 bytes
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{invalidCommitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proof size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create invalid cell proofs (wrong size)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if i == 0 {
|
||||
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
|
||||
} else {
|
||||
invalidCellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
// First blob - valid
|
||||
randBlob1 := random.GetRandBlob(123)
|
||||
var blob1 Blob
|
||||
copy(blob1[:], randBlob1[:])
|
||||
commitment1, err := BlobToKZGCommitment(&blob1)
|
||||
require.NoError(t, err)
|
||||
blobs[0] = blob1[:]
|
||||
commitments[0] = commitment1[:]
|
||||
|
||||
// Second blob - use invalid commitment size
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var blob2 Blob
|
||||
copy(blob2[:], randBlob2[:])
|
||||
blobs[1] = blob2[:]
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
} else {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -159,7 +159,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -240,9 +240,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
@@ -308,6 +309,30 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
|
||||
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -584,7 +609,7 @@ func (s *Service) runLateBlockTasks() {
|
||||
// It returns a map where each key represents a missing BlobSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// BlobSidecars against the set of known missing sidecars.
|
||||
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
@@ -592,7 +617,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
if len(expected) > maxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
indices := store.Summary(root)
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for i := range expected {
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
@@ -607,7 +632,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
// It returns a map where each key represents a missing DataColumnSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// DataColumns against the set of known missing sidecars.
|
||||
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -619,7 +644,7 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
summary := store.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
@@ -717,7 +742,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
@@ -820,7 +845,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,10 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
|
||||
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
|
||||
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
@@ -454,6 +458,9 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if fCheckpoint.Epoch > jCheckpoint.Epoch {
|
||||
return ErrInvalidCheckpointArgs
|
||||
}
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
|
||||
@@ -375,6 +375,81 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_ErrorCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalizedEpoch primitives.Epoch
|
||||
justifiedEpoch primitives.Epoch
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "finalized epoch greater than justified epoch",
|
||||
finalizedEpoch: 5,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: ErrInvalidCheckpointArgs,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized equal to justified",
|
||||
finalizedEpoch: 3,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized less than justified",
|
||||
finalizedEpoch: 2,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
// Create a simple block for testing
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 10
|
||||
blk.Block.ParentRoot = service.originBlockRoot[:]
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
|
||||
// Create checkpoints with test case epochs
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.finalizedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.justifiedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
|
||||
// Set up forkchoice store to avoid other errors
|
||||
fcp := ðpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, service.originBlockRoot, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
t.Context(), wsb, finalizedCheckpoint, justifiedCheckpoint)
|
||||
|
||||
if tt.expectedError != nil {
|
||||
require.ErrorIs(t, err, tt.expectedError)
|
||||
} else {
|
||||
// For valid cases, we might get other errors (like block not being descendant of finalized)
|
||||
// but we shouldn't get the checkpoint validation error
|
||||
if err != nil && errors.Is(err, tt.expectedError) {
|
||||
t.Errorf("Unexpected checkpoint validation error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
//
|
||||
// /- B1
|
||||
@@ -2132,13 +2207,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
// Service's store has the finalized state as headRoot
|
||||
// Service's store has the justified checkpoint root as headRoot (verified below through justified checkpoint comparison)
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
require.NotEqual(t, bytesutil.ToBytes32(params.BeaconConfig().ZeroHash[:]), bytesutil.ToBytes32(headRoot)) // Ensure head is not zero
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, true, optimistic) // Head is now optimistic when starting from justified checkpoint
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
// last valid state's justified checkpoint
|
||||
@@ -2889,7 +2964,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
@@ -2974,7 +3049,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
|
||||
|
||||
for i := range minimumColumnsCountToReconstruct - missingColumns {
|
||||
|
||||
@@ -17,7 +17,7 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
return errors.Wrap(err, "save data column sidecar")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -20,7 +20,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
return errors.Wrap(err, "could not set up forkchoice tree")
|
||||
}
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
@@ -30,24 +30,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
|
||||
if headStr == "" {
|
||||
return fRoot
|
||||
return jRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
@@ -78,7 +78,10 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain); err != nil {
|
||||
return errors.Wrap(err, "could not insert forkchoice chain")
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, chain[0].Block)
|
||||
}
|
||||
|
||||
func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.ReadOnlySignedBeaconBlock) ([]*forkchoicetypes.BlockAndCheckpoints, error) {
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
@@ -124,5 +124,5 @@ func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
require.NotEqual(t, fRoot, root)
|
||||
require.Equal(t, root, service.startupHeadRoot())
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount())
|
||||
require.Equal(t, 3, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
60
beacon-chain/cache/sync_committee.go
vendored
60
beacon-chain/cache/sync_committee.go
vendored
@@ -67,6 +67,30 @@ func (s *SyncCommitteeCache) Clear() {
|
||||
s.cache = cache.NewFIFO(keyFn)
|
||||
}
|
||||
|
||||
// CurrentPeriodPositions returns current period positions of validator indices with respect with
|
||||
// sync committee. If any input validator index has no assignment, an empty list will be returned
|
||||
// for that validator. If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
// Manual checking of state for index position in state is recommended when `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
func (s *SyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
pos, err := s.positionsInCommittee(root, indices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([][]primitives.CommitteeIndex, len(pos))
|
||||
for i, p := range pos {
|
||||
if p == nil {
|
||||
result[i] = []primitives.CommitteeIndex{}
|
||||
} else {
|
||||
result[i] = p.currentPeriod
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CurrentPeriodIndexPosition returns current period index position of a validator index with respect with
|
||||
// sync committee. If the input validator index has no assignment, an empty list will be returned.
|
||||
// If the input root does not exist in cache, `ErrNonExistingSyncCommitteeKey` is returned.
|
||||
@@ -104,11 +128,7 @@ func (s *SyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx primi
|
||||
return pos.nextPeriod, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx primitives.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
func (s *SyncCommitteeCache) positionsInCommittee(root [32]byte, indices []primitives.ValidatorIndex) ([]*positionInCommittee, error) {
|
||||
obj, exists, err := s.cache.GetByKey(key(root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -121,13 +141,33 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
if !ok {
|
||||
return nil, errNotSyncCommitteeIndexPosition
|
||||
}
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[valIdx]
|
||||
if !ok {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
result := make([]*positionInCommittee, len(indices))
|
||||
for i, idx := range indices {
|
||||
idxInCommittee, ok := item.vIndexToPositionMap[idx]
|
||||
if ok {
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
result[i] = idxInCommittee
|
||||
} else {
|
||||
SyncCommitteeCacheMiss.Inc()
|
||||
result[i] = nil
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Helper function for `CurrentPeriodIndexPosition` and `NextPeriodIndexPosition` to return a mapping
|
||||
// of validator index to its index(s) position in the sync committee.
|
||||
func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
root [32]byte, valIdx primitives.ValidatorIndex,
|
||||
) (*positionInCommittee, error) {
|
||||
positions, err := s.positionsInCommittee(root, []primitives.ValidatorIndex{valIdx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(positions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
SyncCommitteeCacheHit.Inc()
|
||||
return idxInCommittee, nil
|
||||
return positions[0], nil
|
||||
}
|
||||
|
||||
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
|
||||
|
||||
@@ -16,6 +16,11 @@ func NewSyncCommittee() *FakeSyncCommitteeCache {
|
||||
return &FakeSyncCommitteeCache{}
|
||||
}
|
||||
|
||||
// CurrentPeriodPositions -- fake
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodPositions(root [32]byte, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CurrentEpochIndexPosition -- fake.
|
||||
func (s *FakeSyncCommitteeCache) CurrentPeriodIndexPosition(root [32]byte, valIdx primitives.ValidatorIndex) ([]primitives.CommitteeIndex, error) {
|
||||
return nil, nil
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
@@ -39,11 +40,11 @@ func ProcessAttesterSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, slashFunc)
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -56,7 +57,7 @@ func ProcessAttesterSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing ethpb.AttSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attester slashing")
|
||||
@@ -75,10 +76,9 @@ func ProcessAttesterSlashing(
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsSlashableValidator(val.ActivationEpoch(), val.WithdrawableEpoch(), val.Slashed(), currentEpoch) {
|
||||
beaconState, err = slashFunc(ctx, beaconState, primitives.ValidatorIndex(validatorIndex))
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, primitives.ValidatorIndex(validatorIndex), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d",
|
||||
validatorIndex)
|
||||
return nil, errors.Wrapf(err, "could not slash validator index %d", validatorIndex)
|
||||
}
|
||||
slashedAny = true
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -44,11 +45,10 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{Epoch: 1}},
|
||||
})}}
|
||||
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := primitives.Slot(0)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
Slot: currentSlot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -62,16 +62,15 @@ func TestProcessAttesterSlashings_DataNotSlashable(t *testing.T) {
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, "attestations are not slashable", err)
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T) {
|
||||
var registry []*ethpb.Validator
|
||||
currentSlot := primitives.Slot(0)
|
||||
|
||||
beaconState, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Validators: []*ethpb.Validator{{}},
|
||||
Slot: currentSlot,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -101,7 +100,7 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, "validator indices count exceeds MAX_VALIDATORS_PER_COMMITTEE", err)
|
||||
}
|
||||
|
||||
@@ -243,7 +242,7 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, tc.st.SetSlot(currentSlot))
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator)
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.ExitInformation(tc.st))
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
@@ -265,3 +264,83 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashing_ExitEpochGetsUpdated(t *testing.T) {
|
||||
st, keys := util.DeterministicGenesisStateElectra(t, 8)
|
||||
bal, err := helpers.TotalActiveBalance(st)
|
||||
require.NoError(t, err)
|
||||
perEpochChurn := helpers.ActivationExitChurnLimit(primitives.Gwei(bal))
|
||||
vals := st.Validators()
|
||||
|
||||
// We set the total effective balance of slashed validators
|
||||
// higher than the churn limit for a single epoch.
|
||||
vals[0].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[1].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[2].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
vals[3].EffectiveBalance = uint64(perEpochChurn / 3)
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
|
||||
sl1att1 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
sl1att2 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
slashing1 := ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: sl1att1,
|
||||
Attestation_2: sl1att2,
|
||||
}
|
||||
sl2att1 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{2, 3},
|
||||
})
|
||||
sl2att2 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{2, 3},
|
||||
})
|
||||
slashing2 := ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: sl2att1,
|
||||
Attestation_2: sl2att2,
|
||||
}
|
||||
|
||||
domain, err := signing.Domain(st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
|
||||
signingRoot, err := signing.ComputeSigningRoot(sl1att1.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := keys[0].Sign(signingRoot[:])
|
||||
sig1 := keys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl1att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl1att2.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[0].Sign(signingRoot[:])
|
||||
sig1 = keys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl1att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl2att1.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[2].Sign(signingRoot[:])
|
||||
sig1 = keys[3].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl2att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
signingRoot, err = signing.ComputeSigningRoot(sl2att2.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = keys[2].Sign(signingRoot[:])
|
||||
sig1 = keys[3].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
sl2att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
assert.Equal(t, primitives.Epoch(0), exitInfo.HighestExitEpoch)
|
||||
_, err = blocks.ProcessAttesterSlashings(t.Context(), st, []ethpb.AttSlashing{slashing1, slashing2}, exitInfo)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Epoch(6), exitInfo.HighestExitEpoch)
|
||||
}
|
||||
|
||||
@@ -191,7 +191,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(p)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.SlashValidator)
|
||||
r, err := ProcessProposerSlashings(ctx, s, []*ethpb.ProposerSlashing{p}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, p)
|
||||
}
|
||||
@@ -224,7 +224,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.SlashValidator)
|
||||
r, err := ProcessAttesterSlashings(ctx, s, []ethpb.AttSlashing{a}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and slashing: %v", r, err, state, a)
|
||||
}
|
||||
@@ -334,7 +334,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e})
|
||||
r, err := ProcessVoluntaryExits(ctx, s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and exit: %v", r, err, state, e)
|
||||
}
|
||||
@@ -351,7 +351,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e})
|
||||
r, err := ProcessVoluntaryExits(t.Context(), s, []*ethpb.SignedVoluntaryExit{e}, v.ExitInformation(s))
|
||||
if err != nil && r != nil {
|
||||
t.Fatalf("return value should be nil on err. found: %v on error: %v for state: %v and block: %v", r, err, state, e)
|
||||
}
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessAttesterSlashings_RegressionSlashableIndices(t *testing.T) {
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.SlashValidator)
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), beaconState, ss, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
if !newRegistry[expectedSlashedVal].Slashed {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -50,13 +49,12 @@ func ProcessVoluntaryExits(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
exits []*ethpb.SignedVoluntaryExit,
|
||||
exitInfo *v.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
// Avoid calculating the epoch churn if no exits exist.
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
maxExitEpoch, churn := v.MaxExitEpochAndChurn(beaconState)
|
||||
var exitEpoch primitives.Epoch
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil voluntary exit in block body")
|
||||
@@ -68,15 +66,8 @@ func ProcessVoluntaryExits(
|
||||
if err := VerifyExitAndSignature(val, beaconState, exit); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify exit %d", idx)
|
||||
}
|
||||
beaconState, exitEpoch, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, maxExitEpoch, churn)
|
||||
if err == nil {
|
||||
if exitEpoch > maxExitEpoch {
|
||||
maxExitEpoch = exitEpoch
|
||||
churn = 1
|
||||
} else if exitEpoch == maxExitEpoch {
|
||||
churn++
|
||||
}
|
||||
} else if !errors.Is(err, v.ErrValidatorAlreadyExited) {
|
||||
beaconState, err = v.InitiateValidatorExit(ctx, beaconState, exit.Exit.ValidatorIndex, exitInfo)
|
||||
if err != nil && !errors.Is(err, v.ErrValidatorAlreadyExited) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -46,7 +47,7 @@ func TestProcessVoluntaryExits_NotActiveLongEnoughToExit(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "validator has not been active long enough to exit"
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -76,7 +77,7 @@ func TestProcessVoluntaryExits_ExitAlreadySubmitted(t *testing.T) {
|
||||
}
|
||||
|
||||
want := "validator with index 0 has already submitted an exit, which will take place at epoch: 10"
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -124,7 +125,7 @@ func TestProcessVoluntaryExits_AppliesCorrectStatus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits)
|
||||
newState, err := blocks.ProcessVoluntaryExits(t.Context(), state, b.Block.Body.VoluntaryExits, validators.ExitInformation(state))
|
||||
require.NoError(t, err, "Could not process exits")
|
||||
newRegistry := newState.Validators()
|
||||
if newRegistry[0].ExitEpoch != helpers.ActivationExitEpoch(primitives.Epoch(state.Slot()/params.BeaconConfig().SlotsPerEpoch)) {
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -19,11 +19,6 @@ import (
|
||||
// ErrCouldNotVerifyBlockHeader is returned when a block header's signature cannot be verified.
|
||||
var ErrCouldNotVerifyBlockHeader = errors.New("could not verify beacon block header")
|
||||
|
||||
type slashValidatorFunc func(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
vid primitives.ValidatorIndex) (state.BeaconState, error)
|
||||
|
||||
// ProcessProposerSlashings is one of the operations performed
|
||||
// on each processed beacon block to slash proposers based on
|
||||
// slashing conditions if any slashable events occurred.
|
||||
@@ -54,11 +49,11 @@ func ProcessProposerSlashings(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []*ethpb.ProposerSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, slashFunc)
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,7 +66,7 @@ func ProcessProposerSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
slashFunc slashValidatorFunc,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
if slashing == nil {
|
||||
@@ -80,7 +75,7 @@ func ProcessProposerSlashing(
|
||||
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
beaconState, err = slashFunc(ctx, beaconState, slashing.Header_1.Header.ProposerIndex)
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestProcessProposerSlashings_UnmatchedHeaderSlots(t *testing.T) {
|
||||
},
|
||||
}
|
||||
want := "mismatched header slots"
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func TestProcessProposerSlashings_SameHeaders(t *testing.T) {
|
||||
},
|
||||
}
|
||||
want := "expected slashing headers to differ"
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -133,7 +133,7 @@ func TestProcessProposerSlashings_ValidatorNotSlashable(t *testing.T) {
|
||||
"validator with key %#x is not slashable",
|
||||
bytesutil.ToBytes48(beaconState.Validators()[0].PublicKey),
|
||||
)
|
||||
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
_, err = blocks.ProcessProposerSlashings(t.Context(), beaconState, b.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -172,7 +172,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -220,7 +220,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -268,7 +268,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
@@ -316,7 +316,7 @@ func TestProcessProposerSlashings_AppliesCorrectStatusCapella(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.SlashValidator)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
|
||||
@@ -84,8 +84,8 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
|
||||
// Handle validator ejections.
|
||||
for _, idx := range eligibleForEjection {
|
||||
var err error
|
||||
// exitQueueEpoch and churn arguments are not used in electra.
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, 0 /*exitQueueEpoch*/, 0 /*churn*/)
|
||||
// exit info is not used in electra
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, &validators.ExitInfo{})
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return fmt.Errorf("failed to initiate validator exit at index %d: %w", idx, err)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -46,18 +47,21 @@ var (
|
||||
// # [New in Electra:EIP7251]
|
||||
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||
|
||||
func ProcessOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
bb := block.Body()
|
||||
// Electra extends the altair operations.
|
||||
st, err := ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), v.SlashValidator)
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), v.SlashValidator)
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
@@ -68,7 +72,7 @@ func ProcessOperations(
|
||||
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits())
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
|
||||
@@ -147,9 +147,8 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
if isFullExitRequest {
|
||||
// Only exit validator if it has no pending withdrawals in the queue
|
||||
if pendingBalanceToWithdraw == 0 {
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
|
||||
var err error
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, vIdx, maxExitEpoch, churn)
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -99,8 +99,7 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
for _, idx := range eligibleForEjection {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
maxExitEpoch, churn := validators.MaxExitEpochAndChurn(st)
|
||||
st, _, err = validators.InitiateValidatorExit(ctx, st, idx, maxExitEpoch, churn)
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
if err := electra.ProcessEpoch(ctx, state); err != nil {
|
||||
return errors.Wrap(err, "could not process epoch in fulu transition")
|
||||
}
|
||||
return processProposerLookahead(ctx, state)
|
||||
return ProcessProposerLookahead(ctx, state)
|
||||
}
|
||||
|
||||
func processProposerLookahead(ctx context.Context, state state.BeaconState) error {
|
||||
func ProcessProposerLookahead(ctx context.Context, state state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "fulu.processProposerLookahead")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -317,23 +317,15 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
}
|
||||
|
||||
proposerAssignments := make(map[primitives.ValidatorIndex][]primitives.Slot)
|
||||
|
||||
originalStateSlot := state.Slot()
|
||||
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
}
|
||||
// Set the state's current slot.
|
||||
if err := state.SetSlot(slot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the proposer index for the current slot.
|
||||
i, err := BeaconProposerIndex(ctx, state)
|
||||
i, err := BeaconProposerIndexAtSlot(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", state.Slot())
|
||||
return nil, errors.Wrapf(err, "could not check proposer at slot %d", slot)
|
||||
}
|
||||
|
||||
// Append the slot to the proposer's assignments.
|
||||
@@ -342,12 +334,6 @@ func ProposerAssignments(ctx context.Context, state state.BeaconState, epoch pri
|
||||
}
|
||||
proposerAssignments[i] = append(proposerAssignments[i], slot)
|
||||
}
|
||||
|
||||
// Reset state back to its original slot.
|
||||
if err := state.SetSlot(originalStateSlot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return proposerAssignments, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -87,6 +87,11 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// UpdateTotalActiveBalanceCache updates the cache with the given total active balance.
|
||||
func UpdateTotalActiveBalanceCache(s state.BeaconState, total uint64) error {
|
||||
return balanceCache.AddTotalEffectiveBalance(s, total)
|
||||
}
|
||||
|
||||
// IncreaseBalance increases validator with the given 'index' balance by 'delta' in Gwei.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
|
||||
@@ -297,3 +297,30 @@ func TestIncreaseBadBalance_NotOK(t *testing.T) {
|
||||
require.ErrorContains(t, "addition overflows", helpers.IncreaseBalance(state, test.i, test.nb))
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateTotalActiveBalanceCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create a test state with some validators
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: 32 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
|
||||
{EffectiveBalance: 32 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
|
||||
{EffectiveBalance: 31 * 1e9, ExitEpoch: params.BeaconConfig().FarFutureEpoch, ActivationEpoch: 0},
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: validators,
|
||||
Slot: 0,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test updating cache with a specific total
|
||||
testTotal := uint64(95 * 1e9) // 32 + 32 + 31 = 95
|
||||
err = helpers.UpdateTotalActiveBalanceCache(state, testTotal)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the cache was updated by retrieving the total active balance
|
||||
// which should now return the cached value
|
||||
cachedTotal, err := helpers.TotalActiveBalance(state)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testTotal, cachedTotal, "Cache should return the updated total")
|
||||
}
|
||||
|
||||
@@ -21,6 +21,39 @@ var (
|
||||
syncCommitteeCache = cache.NewSyncCommittee()
|
||||
)
|
||||
|
||||
// CurrentPeriodPositions returns committee indices of the current period sync committee for input validators.
|
||||
func CurrentPeriodPositions(st state.BeaconState, indices []primitives.ValidatorIndex) ([][]primitives.CommitteeIndex, error) {
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pos, err := syncCommitteeCache.CurrentPeriodPositions(root, indices)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fill in the cache on miss.
|
||||
go func() {
|
||||
if err := syncCommitteeCache.UpdatePositionsInCommittee(root, st); err != nil {
|
||||
log.WithError(err).Error("Could not fill sync committee cache on miss")
|
||||
}
|
||||
}()
|
||||
|
||||
pos = make([][]primitives.CommitteeIndex, len(indices))
|
||||
for i, idx := range indices {
|
||||
pubkey := st.PubkeyAtIndex(idx)
|
||||
pos[i] = findSubCommitteeIndices(pubkey[:], committee.Pubkeys)
|
||||
}
|
||||
return pos, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pos, nil
|
||||
}
|
||||
|
||||
// IsCurrentPeriodSyncCommittee returns true if the input validator index belongs in the current period sync committee
|
||||
// along with the sync committee root.
|
||||
// 1. Checks if the public key exists in the sync committee cache
|
||||
|
||||
@@ -17,6 +17,38 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestCurrentPeriodPositions(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
}
|
||||
syncCommittee.Pubkeys[i] = bytesutil.PadTo(k, 48)
|
||||
}
|
||||
state, err := state_native.InitializeFromProtoAltair(ðpb.BeaconStateAltair{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetCurrentSyncCommittee(syncCommittee))
|
||||
require.NoError(t, state.SetNextSyncCommittee(syncCommittee))
|
||||
require.NoError(t, err, helpers.SyncCommitteeCache().UpdatePositionsInCommittee([32]byte{}, state))
|
||||
|
||||
positions, err := helpers.CurrentPeriodPositions(state, []primitives.ValidatorIndex{0, 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(positions))
|
||||
require.Equal(t, 1, len(positions[0]))
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), positions[0][0])
|
||||
require.Equal(t, 1, len(positions[1]))
|
||||
assert.Equal(t, primitives.CommitteeIndex(1), positions[1][0])
|
||||
}
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
@@ -78,6 +110,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +297,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -309,23 +309,29 @@ func beaconProposerIndexAtSlotFulu(state state.ReadOnlyBeaconState, slot primiti
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
spe := params.BeaconConfig().SlotsPerEpoch
|
||||
if e == stateEpoch {
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
return lookAhead[slot%spe], nil
|
||||
}
|
||||
// The caller is requesting the proposer for the next epoch
|
||||
return lookAhead[slot%params.BeaconConfig().SlotsPerEpoch+params.BeaconConfig().SlotsPerEpoch], nil
|
||||
return lookAhead[spe+slot%spe], nil
|
||||
}
|
||||
|
||||
// BeaconProposerIndexAtSlot returns proposer index at the given slot from the
|
||||
// point of view of the given state as head state
|
||||
func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
if state.Version() >= version.Fulu {
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(state.Slot())
|
||||
// Even if the state is post Fulu, we may request a past proposer index.
|
||||
if state.Version() >= version.Fulu && e >= params.BeaconConfig().FuluForkEpoch {
|
||||
// We can use the cached lookahead only for the current and the next epoch.
|
||||
if e == stateEpoch || e == stateEpoch+1 {
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
}
|
||||
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
|
||||
// For simplicity, the node will skip caching of genesis epoch.
|
||||
if e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
// For simplicity, the node will skip caching of genesis epoch. If the passed state has not yet reached this slot then we do not check the cache.
|
||||
if e <= stateEpoch && e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
s, err := slots.EpochEnd(e - 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -1161,6 +1161,10 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
lookahead := make([]uint64, 64)
|
||||
lookahead[0] = 15
|
||||
lookahead[1] = 16
|
||||
@@ -1180,8 +1184,4 @@ func TestBeaconProposerIndexAtSlotFulu(t *testing.T) {
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 130)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(42), idx)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 95)
|
||||
require.ErrorContains(t, "slot 95 is not in the current epoch 3 or the next epoch", err)
|
||||
_, err = helpers.BeaconProposerIndexAtSlot(t.Context(), st, 160)
|
||||
require.ErrorContains(t, "slot 160 is not in the current epoch 3 or the next epoch", err)
|
||||
}
|
||||
|
||||
@@ -223,6 +223,14 @@ func dataColumnsSidecars(
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
// Validate that we have enough cells and proofs for this column index
|
||||
if columnIndex >= uint64(len(cellsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds cells length %d for blob %d", columnIndex, len(cellsForRow), rowIndex)
|
||||
}
|
||||
if columnIndex >= uint64(len(proofsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds proofs length %d for blob %d", columnIndex, len(proofsForRow), rowIndex)
|
||||
}
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
|
||||
@@ -67,6 +67,55 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("cells array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with insufficient cells for the number of columns.
|
||||
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, 10), // Only 10 cells
|
||||
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail because the function will try to access columns up to NumberOfColumns
|
||||
// but we only have 10 cells/proofs.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds cells length", err)
|
||||
})
|
||||
|
||||
t.Run("proofs array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail when trying to access proof beyond index 4.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds proofs length", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
|
||||
@@ -18,8 +18,8 @@ var (
|
||||
ErrBlobsCellsProofsMismatch = errors.New("blobs and cells proofs mismatch")
|
||||
)
|
||||
|
||||
// MinimumColumnsCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
func MinimumColumnsCountToReconstruct() uint64 {
|
||||
// MinimumColumnCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
func MinimumColumnCountToReconstruct() uint64 {
|
||||
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If the number of columns is even, then we need total / 2 columns to reconstruct.
|
||||
return (params.BeaconConfig().NumberOfColumns + 1) / 2
|
||||
@@ -58,7 +58,7 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
|
||||
// Check if there is enough sidecars to reconstruct the missing columns.
|
||||
sidecarCount := len(sidecarByIndex)
|
||||
if uint64(sidecarCount) < MinimumColumnsCountToReconstruct() {
|
||||
if uint64(sidecarCount) < MinimumColumnCountToReconstruct() {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestMinimumColumnsCountToReconstruct(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the minimum number of columns needed to reconstruct.
|
||||
actual := peerdas.MinimumColumnsCountToReconstruct()
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
@@ -100,7 +100,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
t.Run("not enough columns to enable reconstruction", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
minimum := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimum := peerdas.MinimumColumnCountToReconstruct()
|
||||
_, err := peerdas.ReconstructDataColumnSidecars(verifiedRoSidecars[:minimum-1])
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
b "github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
@@ -374,15 +375,18 @@ func ProcessBlockForStateRoot(
|
||||
}
|
||||
|
||||
// This calls altair block operations.
|
||||
func altairOperations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), v.SlashValidator)
|
||||
func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), v.SlashValidator)
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
@@ -393,7 +397,7 @@ func altairOperations(
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
@@ -401,15 +405,18 @@ func altairOperations(
|
||||
}
|
||||
|
||||
// This calls phase 0 block operations.
|
||||
func phase0Operations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
st, err := b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), v.SlashValidator)
|
||||
func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), v.SlashValidator)
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
}
|
||||
@@ -420,5 +427,9 @@ func phase0Operations(
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposits")
|
||||
}
|
||||
return b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits())
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -13,34 +13,55 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ExitInfo provides information about validator exits in the state.
|
||||
type ExitInfo struct {
|
||||
HighestExitEpoch primitives.Epoch
|
||||
Churn uint64
|
||||
TotalActiveBalance uint64
|
||||
}
|
||||
|
||||
// ErrValidatorAlreadyExited is an error raised when trying to process an exit of
|
||||
// an already exited validator
|
||||
var ErrValidatorAlreadyExited = errors.New("validator already exited")
|
||||
|
||||
// MaxExitEpochAndChurn returns the maximum non-FAR_FUTURE_EPOCH exit
|
||||
// epoch and the number of them
|
||||
func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, churn uint64) {
|
||||
// ExitInformation returns information about validator exits.
|
||||
func ExitInformation(s state.BeaconState) *ExitInfo {
|
||||
exitInfo := &ExitInfo{}
|
||||
|
||||
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
|
||||
currentEpoch := slots.ToEpoch(s.Slot())
|
||||
totalActiveBalance := uint64(0)
|
||||
|
||||
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
e := val.ExitEpoch()
|
||||
if e != farFutureEpoch {
|
||||
if e > maxExitEpoch {
|
||||
maxExitEpoch = e
|
||||
churn = 1
|
||||
} else if e == maxExitEpoch {
|
||||
churn++
|
||||
if e > exitInfo.HighestExitEpoch {
|
||||
exitInfo.HighestExitEpoch = e
|
||||
exitInfo.Churn = 1
|
||||
} else if e == exitInfo.HighestExitEpoch {
|
||||
exitInfo.Churn++
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate total active balance in the same loop
|
||||
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
totalActiveBalance += val.EffectiveBalance()
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
_ = err
|
||||
return
|
||||
|
||||
// Apply minimum balance as per spec
|
||||
exitInfo.TotalActiveBalance = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
|
||||
return exitInfo
|
||||
}
|
||||
|
||||
// InitiateValidatorExit takes in validator index and updates
|
||||
@@ -64,59 +85,117 @@ func MaxExitEpochAndChurn(s state.BeaconState) (maxExitEpoch primitives.Epoch, c
|
||||
// # Set validator exit epoch and withdrawable epoch
|
||||
// validator.exit_epoch = exit_queue_epoch
|
||||
// validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primitives.ValidatorIndex, exitQueueEpoch primitives.Epoch, churn uint64) (state.BeaconState, primitives.Epoch, error) {
|
||||
func InitiateValidatorExit(
|
||||
ctx context.Context,
|
||||
s state.BeaconState,
|
||||
idx primitives.ValidatorIndex,
|
||||
exitInfo *ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
validator, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, validator.ExitEpoch, ErrValidatorAlreadyExited
|
||||
return s, ErrValidatorAlreadyExited
|
||||
}
|
||||
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
// Relevant spec code from phase0:
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
|
||||
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitableEpoch > exitQueueEpoch {
|
||||
exitQueueEpoch = exitableEpoch
|
||||
churn = 0
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
|
||||
if churn >= currentChurn {
|
||||
exitQueueEpoch, err = exitQueueEpoch.SafeAdd(1)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// [Modified in Electra:EIP7251]
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
|
||||
var err error
|
||||
exitQueueEpoch, err = s.ExitEpochAndUpdateChurn(primitives.Gwei(validator.EffectiveBalance))
|
||||
exitInfo.HighestExitEpoch, err = s.ExitEpochAndUpdateChurn(primitives.Gwei(validator.EffectiveBalance))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
validator.ExitEpoch = exitQueueEpoch
|
||||
validator.WithdrawableEpoch, err = exitQueueEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
validator.ExitEpoch = exitInfo.HighestExitEpoch
|
||||
validator.WithdrawableEpoch, err = exitInfo.HighestExitEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
|
||||
return nil, 0, err
|
||||
return nil, err
|
||||
}
|
||||
return s, exitQueueEpoch, nil
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InitiateValidatorExitForTotalBal has the same functionality as InitiateValidatorExit,
|
||||
// the only difference being how total active balance is obtained. In InitiateValidatorExit
|
||||
// it is calculated inside the function and in InitiateValidatorExitForTotalBal it's a
|
||||
// function argument.
|
||||
func InitiateValidatorExitForTotalBal(
|
||||
ctx context.Context,
|
||||
s state.BeaconState,
|
||||
idx primitives.ValidatorIndex,
|
||||
exitInfo *ExitInfo,
|
||||
totalActiveBalance primitives.Gwei,
|
||||
) (state.BeaconState, error) {
|
||||
validator, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, ErrValidatorAlreadyExited
|
||||
}
|
||||
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// [Modified in Electra:EIP7251]
|
||||
// exit_queue_epoch = compute_exit_epoch_and_update_churn(state, validator.effective_balance)
|
||||
var err error
|
||||
exitInfo.HighestExitEpoch, err = s.ExitEpochAndUpdateChurnForTotalBal(totalActiveBalance, primitives.Gwei(validator.EffectiveBalance))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
validator.ExitEpoch = exitInfo.HighestExitEpoch
|
||||
validator.WithdrawableEpoch, err = exitInfo.HighestExitEpoch.SafeAddEpoch(params.BeaconConfig().MinValidatorWithdrawabilityDelay)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.UpdateValidatorAtIndex(idx, validator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, exitInfo *ExitInfo) error {
|
||||
// Relevant spec code from phase0:
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
|
||||
// exit_queue_churn = len([v for v in state.validators if v.exit_epoch == exit_queue_epoch])
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitableEpoch > exitInfo.HighestExitEpoch {
|
||||
exitInfo.HighestExitEpoch = exitableEpoch
|
||||
exitInfo.Churn = 0
|
||||
}
|
||||
activeValidatorCount, err := helpers.ActiveValidatorCount(ctx, s, time.CurrentEpoch(s))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get active validator count")
|
||||
}
|
||||
currentChurn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if exitInfo.Churn >= currentChurn {
|
||||
exitInfo.HighestExitEpoch, err = exitInfo.HighestExitEpoch.SafeAdd(1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exitInfo.Churn = 1
|
||||
} else {
|
||||
exitInfo.Churn = exitInfo.Churn + 1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SlashValidator slashes the malicious validator's balance and awards
|
||||
@@ -152,9 +231,12 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
func SlashValidator(
|
||||
ctx context.Context,
|
||||
s state.BeaconState,
|
||||
slashedIdx primitives.ValidatorIndex) (state.BeaconState, error) {
|
||||
maxExitEpoch, churn := MaxExitEpochAndChurn(s)
|
||||
s, _, err := InitiateValidatorExit(ctx, s, slashedIdx, maxExitEpoch, churn)
|
||||
slashedIdx primitives.ValidatorIndex,
|
||||
exitInfo *ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
s, err = InitiateValidatorExitForTotalBal(ctx, s, slashedIdx, exitInfo, primitives.Gwei(exitInfo.TotalActiveBalance))
|
||||
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
}
|
||||
|
||||
@@ -49,9 +49,11 @@ func TestInitiateValidatorExit_AlreadyExited(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, 0, 199, 1)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: 199, Churn: 1}
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, 0, exitInfo)
|
||||
require.ErrorIs(t, err, validators.ErrValidatorAlreadyExited)
|
||||
require.Equal(t, exitEpoch, epoch)
|
||||
assert.Equal(t, primitives.Epoch(199), exitInfo.HighestExitEpoch)
|
||||
assert.Equal(t, uint64(1), exitInfo.Churn)
|
||||
v, err := newState.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, exitEpoch, v.ExitEpoch, "Already exited")
|
||||
@@ -68,9 +70,11 @@ func TestInitiateValidatorExit_ProperExit(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitedEpoch+2, 1)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: exitedEpoch + 2, Churn: 1}
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitedEpoch+2, epoch)
|
||||
assert.Equal(t, exitedEpoch+2, exitInfo.HighestExitEpoch)
|
||||
assert.Equal(t, uint64(2), exitInfo.Churn)
|
||||
v, err := newState.ValidatorAtIndex(idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, exitedEpoch+2, v.ExitEpoch, "Exit epoch was not the highest")
|
||||
@@ -88,9 +92,11 @@ func TestInitiateValidatorExit_ChurnOverflow(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitedEpoch+2, 4)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: exitedEpoch + 2, Churn: 4}
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, exitInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitedEpoch+3, epoch)
|
||||
assert.Equal(t, exitedEpoch+3, exitInfo.HighestExitEpoch)
|
||||
assert.Equal(t, uint64(1), exitInfo.Churn)
|
||||
|
||||
// Because of exit queue overflow,
|
||||
// validator who init exited has to wait one more epoch.
|
||||
@@ -110,7 +116,8 @@ func TestInitiateValidatorExit_WithdrawalOverflows(t *testing.T) {
|
||||
}}
|
||||
state, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
_, _, err = validators.InitiateValidatorExit(t.Context(), state, 1, params.BeaconConfig().FarFutureEpoch-1, 1)
|
||||
exitInfo := &validators.ExitInfo{HighestExitEpoch: params.BeaconConfig().FarFutureEpoch - 1, Churn: 1}
|
||||
_, err = validators.InitiateValidatorExit(t.Context(), state, 1, exitInfo)
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
}
|
||||
|
||||
@@ -146,12 +153,11 @@ func TestInitiateValidatorExit_ProperExit_Electra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), ebtc)
|
||||
|
||||
newState, epoch, err := validators.InitiateValidatorExit(t.Context(), state, idx, 0, 0) // exitQueueEpoch and churn are not used in electra
|
||||
newState, err := validators.InitiateValidatorExit(t.Context(), state, idx, &validators.ExitInfo{}) // exit info is not used in electra
|
||||
require.NoError(t, err)
|
||||
|
||||
// Expect that the exit epoch is the next available epoch with max seed lookahead.
|
||||
want := helpers.ActivationExitEpoch(exitedEpoch + 1)
|
||||
require.Equal(t, want, epoch)
|
||||
v, err := newState.ValidatorAtIndex(idx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, want, v.ExitEpoch, "Exit epoch was not the highest")
|
||||
@@ -190,7 +196,7 @@ func TestSlashValidator_OK(t *testing.T) {
|
||||
require.NoError(t, err, "Could not get proposer")
|
||||
proposerBal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx, validators.ExitInformation(state))
|
||||
require.NoError(t, err, "Could not slash validator")
|
||||
require.Equal(t, true, slashedState.Version() == version.Phase0)
|
||||
|
||||
@@ -244,7 +250,7 @@ func TestSlashValidator_Electra(t *testing.T) {
|
||||
require.NoError(t, err, "Could not get proposer")
|
||||
proposerBal, err := state.BalanceAtIndex(proposer)
|
||||
require.NoError(t, err)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx)
|
||||
slashedState, err := validators.SlashValidator(t.Context(), state, slashedIdx, validators.ExitInformation(state))
|
||||
require.NoError(t, err, "Could not slash validator")
|
||||
require.Equal(t, true, slashedState.Version() == version.Electra)
|
||||
|
||||
@@ -505,8 +511,8 @@ func TestValidatorMaxExitEpochAndChurn(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
epoch, churn := validators.MaxExitEpochAndChurn(s)
|
||||
require.Equal(t, tt.wantedEpoch, epoch)
|
||||
require.Equal(t, tt.wantedChurn, churn)
|
||||
exitInfo := validators.ExitInformation(s)
|
||||
require.Equal(t, tt.wantedEpoch, exitInfo.HighestExitEpoch)
|
||||
require.Equal(t, tt.wantedChurn, exitInfo.Churn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
@@ -13,7 +12,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -23,7 +21,6 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -33,7 +30,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
],
|
||||
@@ -49,7 +45,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,30 +53,25 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
|
||||
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
if len(blobSidecars) > 1 {
|
||||
firstRoot := blobSidecars[0].BlockRoot()
|
||||
for _, sidecar := range blobSidecars[1:] {
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := sidecars[0].BlockRoot()
|
||||
for _, sidecar := range sidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blobSidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromSidecar(blobSidecars[0])
|
||||
key := keyFromSidecar(sidecars[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for _, blobSidecar := range blobSidecars {
|
||||
for _, blobSidecar := range sidecars {
|
||||
if err := entry.stash(&blobSidecar); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -118,23 +118,21 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[2]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
}
|
||||
@@ -149,10 +147,8 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
@@ -161,29 +157,25 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
|
||||
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
|
||||
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, as.Persist(32+slotOOB, more[0]))
|
||||
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(1, more...))
|
||||
require.NoError(t, as.Persist(1, moreBlobSidecars...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
@@ -1,213 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
nodeID enode.ID,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
custodyGroupCount uint64,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custodyGroupCount: custodyGroupCount,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first sidecar.
|
||||
firstSidecar := dataColumnSidecars[0]
|
||||
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := firstSidecar.BlockRoot()
|
||||
for _, sidecar := range dataColumnSidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
|
||||
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := cacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
for _, sidecar := range dataColumnSidecars {
|
||||
if err := entry.stash(&sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
|
||||
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := cacheKey{slot: block.Block().Slot(), root: blockRoot}
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return errors.Wrap(err, "valid fields")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verified RO data columns - should never happen")
|
||||
}
|
||||
|
||||
if err := s.store.Save(verifiedRoDataColumns); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current epoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve peer info.
|
||||
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
commitmentsArraySize := uint64(len(commitmentsArray))
|
||||
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
if column >= commitmentsArraySize {
|
||||
return nil, errors.Errorf("custody column index %d too high (max allowed %d) - should never happen", column, commitmentsArraySize)
|
||||
}
|
||||
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
@@ -1,313 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
|
||||
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
|
||||
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[int]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
key := cacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = 100
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, commitments := range commitmentsArray {
|
||||
require.DeepEqual(t, tc.commitments, commitments)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
|
||||
}
|
||||
|
||||
return roSidecars, roDataColumns
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
@@ -15,5 +15,5 @@ import (
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
||||
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
@@ -5,13 +5,12 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||
type MockAvailabilityStore struct {
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
PersistBlobsCallback func(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
@@ -25,13 +24,9 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
|
||||
}
|
||||
|
||||
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error {
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(current, blobSidecars...)
|
||||
return m.PersistBlobsCallback(current, blobSidecar...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -100,6 +100,14 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
// DataColumnStorageReader is an interface to read data column sidecars from the filesystem.
|
||||
type DataColumnStorageReader interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
var _ DataColumnStorageReader = &DataColumnStorage{}
|
||||
|
||||
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
|
||||
func WithDataColumnBasePath(base string) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
|
||||
@@ -84,12 +84,6 @@ func (s DataColumnStorageSummary) Stored() map[uint64]bool {
|
||||
return stored
|
||||
}
|
||||
|
||||
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
|
||||
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type DataColumnStorageSummarizer interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
}
|
||||
|
||||
type dataColumnStorageSummaryCache struct {
|
||||
mu sync.RWMutex
|
||||
dataColumnCount float64
|
||||
@@ -98,8 +92,6 @@ type dataColumnStorageSummaryCache struct {
|
||||
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
|
||||
}
|
||||
|
||||
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
|
||||
|
||||
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
|
||||
return &dataColumnStorageSummaryCache{
|
||||
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),
|
||||
|
||||
@@ -116,19 +116,43 @@ func (l *periodicEpochLayout) pruneBefore(before primitives.Epoch) (*pruneSummar
|
||||
}
|
||||
// Roll up summaries and clean up per-epoch directories.
|
||||
rollup := &pruneSummary{}
|
||||
|
||||
// Track which period directories might be empty after epoch removal
|
||||
periodsToCheck := make(map[string]struct{})
|
||||
|
||||
for epoch, sum := range sums {
|
||||
rollup.blobsPruned += sum.blobsPruned
|
||||
rollup.failedRemovals = append(rollup.failedRemovals, sum.failedRemovals...)
|
||||
rmdir := l.epochDir(epoch)
|
||||
periodDir := l.periodDir(epoch)
|
||||
|
||||
if len(sum.failedRemovals) == 0 {
|
||||
if err := l.fs.Remove(rmdir); err != nil {
|
||||
log.WithField("dir", rmdir).WithError(err).Error("Failed to remove epoch directory while pruning")
|
||||
} else {
|
||||
periodsToCheck[periodDir] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
log.WithField("dir", rmdir).WithField("numFailed", len(sum.failedRemovals)).WithError(err).Error("Unable to remove epoch directory due to pruning failures")
|
||||
}
|
||||
}
|
||||
|
||||
//Clean up empty period directories
|
||||
for periodDir := range periodsToCheck {
|
||||
entries, err := afero.ReadDir(l.fs, periodDir)
|
||||
if err != nil {
|
||||
log.WithField("dir", periodDir).WithError(err).Debug("Failed to read period directory contents")
|
||||
continue
|
||||
}
|
||||
|
||||
// Only attempt to remove if directory is empty
|
||||
if len(entries) == 0 {
|
||||
if err := l.fs.Remove(periodDir); err != nil {
|
||||
log.WithField("dir", periodDir).WithError(err).Error("Failed to remove empty period directory")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rollup, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -144,14 +144,3 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
|
||||
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
|
||||
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
|
||||
}
|
||||
|
||||
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
|
||||
c := newDataColumnStorageSummaryCache()
|
||||
for root, indices := range set {
|
||||
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -195,3 +196,48 @@ func TestLayoutPruneBefore(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLayoutByEpochPruneBefore(t *testing.T) {
|
||||
roots := testRoots(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
pruned []testIdent
|
||||
remain []testIdent
|
||||
err error
|
||||
sum pruneSummary
|
||||
}{
|
||||
{
|
||||
name: "single epoch period cleanup",
|
||||
pruned: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 367076, index: 0}},
|
||||
},
|
||||
remain: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[1], epoch: 371176, index: 0}}, // Different period
|
||||
},
|
||||
sum: pruneSummary{blobsPruned: 1},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
fs, bs := NewEphemeralBlobStorageAndFs(t, WithLayout(LayoutNameByEpoch))
|
||||
pruned := testSetupBlobIdentPaths(t, fs, bs, c.pruned)
|
||||
remain := testSetupBlobIdentPaths(t, fs, bs, c.remain)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
for _, id := range pruned {
|
||||
_, err := fs.Stat(bs.layout.sszPath(id))
|
||||
require.Equal(t, true, os.IsNotExist(err))
|
||||
|
||||
dirs := bs.layout.blockParentDirs(id)
|
||||
for i := len(dirs) - 1; i > 0; i-- {
|
||||
_, err = fs.Stat(dirs[i])
|
||||
require.Equal(t, true, os.IsNotExist(err))
|
||||
}
|
||||
}
|
||||
for _, id := range remain {
|
||||
_, err := fs.Stat(bs.layout.sszPath(id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,6 +115,17 @@ type NoHeadAccessDatabase interface {
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
@@ -131,16 +142,6 @@ type HeadAccessDatabase interface {
|
||||
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HeadBlockRoot() ([32]byte, error)
|
||||
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -318,6 +318,7 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx, clearer); err != nil {
|
||||
@@ -620,35 +621,55 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBl
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseIPNetStrings(ipWhitelist []string) ([]*net.IPNet, error) {
|
||||
ipNets := make([]*net.IPNet, 0, len(ipWhitelist))
|
||||
for _, cidr := range ipWhitelist {
|
||||
_, ipNet, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("cidr", cidr).Error("Invalid CIDR in IP colocation whitelist")
|
||||
return nil, err
|
||||
}
|
||||
ipNets = append(ipNets, ipNet)
|
||||
log.WithField("cidr", cidr).Info("Added IP to colocation whitelist")
|
||||
}
|
||||
return ipNets, nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
bootstrapNodeAddrs, dataDir, err := registration.P2PPreregistration(cliCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not register p2p service")
|
||||
}
|
||||
|
||||
colocationWhitelist, err := parseIPNetStrings(slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PColocationWhitelist.Name)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register p2p service: %w", err)
|
||||
}
|
||||
|
||||
svc, err := p2p.NewService(b.ctx, &p2p.Config{
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
DiscoveryDir: filepath.Join(dataDir, "discovery"),
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
NoDiscovery: cliCtx.Bool(cmd.NoDiscovery.Name),
|
||||
StaticPeers: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.StaticPeers.Name)),
|
||||
Discv5BootStrapAddrs: p2p.ParseBootStrapAddrs(bootstrapNodeAddrs),
|
||||
RelayNodeAddr: cliCtx.String(cmd.RelayNode.Name),
|
||||
DataDir: dataDir,
|
||||
DiscoveryDir: filepath.Join(dataDir, "discovery"),
|
||||
LocalIP: cliCtx.String(cmd.P2PIP.Name),
|
||||
HostAddress: cliCtx.String(cmd.P2PHost.Name),
|
||||
HostDNS: cliCtx.String(cmd.P2PHostDNS.Name),
|
||||
PrivateKey: cliCtx.String(cmd.P2PPrivKey.Name),
|
||||
StaticPeerID: cliCtx.Bool(cmd.P2PStaticID.Name),
|
||||
QUICPort: cliCtx.Uint(cmd.P2PQUICPort.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
QueueSize: cliCtx.Uint(cmd.PubsubQueueSize.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
IPColocationWhitelist: colocationWhitelist,
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -845,6 +866,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -939,6 +961,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -966,6 +989,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
|
||||
@@ -262,3 +262,46 @@ func TestCORS(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseIPNetStrings(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
whitelist []string
|
||||
wantCount int
|
||||
wantError string
|
||||
}{
|
||||
{
|
||||
name: "empty whitelist",
|
||||
whitelist: []string{},
|
||||
wantCount: 0,
|
||||
},
|
||||
{
|
||||
name: "single IP whitelist",
|
||||
whitelist: []string{"192.168.1.1/32"},
|
||||
wantCount: 1,
|
||||
},
|
||||
{
|
||||
name: "multiple IPs whitelist",
|
||||
whitelist: []string{"192.168.1.0/24", "10.0.0.0/8", "34.42.19.170/32"},
|
||||
wantCount: 3,
|
||||
},
|
||||
{
|
||||
name: "invalid CIDR returns error",
|
||||
whitelist: []string{"192.168.1.0/24", "invalid-cidr", "10.0.0.0/8"},
|
||||
wantCount: 0,
|
||||
wantError: "invalid CIDR address",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := parseIPNetStrings(tt.whitelist)
|
||||
assert.Equal(t, tt.wantCount, len(result))
|
||||
if len(tt.wantError) == 0 {
|
||||
assert.Equal(t, nil, err)
|
||||
} else {
|
||||
assert.ErrorContains(t, tt.wantError, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,6 +95,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peerstore:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/net/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
@@ -184,6 +185,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
@@ -195,7 +197,6 @@ go_test(
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
|
||||
@@ -123,9 +123,6 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
if !hasPeer {
|
||||
attestationBroadcastAttempts.Inc()
|
||||
if err := func() error {
|
||||
s.subnetLocker(subnet).Lock()
|
||||
defer s.subnetLocker(subnet).Unlock()
|
||||
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, AttestationSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
@@ -305,15 +302,15 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
func (s *Service) BroadcastDataColumn(
|
||||
func (s *Service) BroadcastDataColumnSidecar(
|
||||
root [fieldparams.RootLength]byte,
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumn")
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
@@ -330,12 +327,12 @@ func (s *Service) BroadcastDataColumn(
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
@@ -343,7 +340,7 @@ func (s *Service) internalBroadcastDataColumn(
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
|
||||
@@ -716,7 +716,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
err = service.BroadcastDataColumn(emptyRoot, subnet, nil)
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, nil)
|
||||
require.ErrorContains(t, "attempted to broadcast nil", err)
|
||||
|
||||
// Subscribe to the topic.
|
||||
@@ -727,7 +727,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumn(emptyRoot, subnet, sidecar)
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
@@ -10,34 +11,56 @@ import (
|
||||
|
||||
// This is the default queue size used if we have specified an invalid one.
|
||||
const defaultPubsubQueueSize = 600
|
||||
const (
|
||||
// defaultConnManagerPruneAbove sets the number of peers where ConnectionManager
|
||||
// will begin to internally prune peers. This value is set based on the internal
|
||||
// value of the libp2p DefaultConectionManager "high water mark". The "low water mark"
|
||||
// is the number of peers where ConnManager will stop pruning. This value is computed
|
||||
// by subtracting connManagerPruneAmount from the high water mark.
|
||||
defaultConnManagerPruneAbove = 192
|
||||
connManagerPruneAmount = 32
|
||||
)
|
||||
|
||||
// Config for the p2p service. These parameters are set from application level flags
|
||||
// to initialize the p2p service.
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
DisableLivenessCheck bool
|
||||
StaticPeers []string
|
||||
Discv5BootStrapAddrs []string
|
||||
RelayNodeAddr string
|
||||
LocalIP string
|
||||
HostAddress string
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
DiscoveryDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
PingInterval time.Duration
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabaseWithSeqNum
|
||||
ClockWaiter startup.ClockWaiter
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
StaticPeerID bool
|
||||
DisableLivenessCheck bool
|
||||
StaticPeers []string
|
||||
Discv5BootStrapAddrs []string
|
||||
RelayNodeAddr string
|
||||
LocalIP string
|
||||
HostAddress string
|
||||
HostDNS string
|
||||
PrivateKey string
|
||||
DataDir string
|
||||
DiscoveryDir string
|
||||
QUICPort uint
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
PingInterval time.Duration
|
||||
MaxPeers uint
|
||||
QueueSize uint
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
IPColocationWhitelist []*net.IPNet
|
||||
StateNotifier statefeed.Notifier
|
||||
DB db.ReadOnlyDatabaseWithSeqNum
|
||||
ClockWaiter startup.ClockWaiter
|
||||
}
|
||||
|
||||
// connManagerLowHigh picks the low and high water marks for the connection manager based
|
||||
// on the MaxPeers setting. The high water mark will be at least the default high water mark
|
||||
// (192), or MaxPeers + 32, whichever is higher. The low water mark is set to be 32 less than
|
||||
// the high water mark. This is done to ensure the ConnManager never prunes peers that the
|
||||
// node has connected to based on the MaxPeers setting.
|
||||
func (cfg *Config) connManagerLowHigh() (int, int) {
|
||||
maxPeersPlusMargin := int(cfg.MaxPeers) + connManagerPruneAmount
|
||||
high := max(maxPeersPlusMargin, defaultConnManagerPruneAbove)
|
||||
low := high - connManagerPruneAmount
|
||||
return low, high
|
||||
}
|
||||
|
||||
// validateConfig validates whether the values provided are accurate and will set
|
||||
|
||||
@@ -155,6 +155,7 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
"agent": agentString(pid, s.Host()),
|
||||
})
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
@@ -269,6 +270,7 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
metaData: tc.metadata,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
@@ -329,6 +331,7 @@ func TestCustodyGroupCountFromPeerENR(t *testing.T) {
|
||||
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
actual := service.custodyGroupCountFromPeerENR(pid)
|
||||
|
||||
@@ -443,20 +443,27 @@ func (s *Service) findPeers(ctx context.Context, missingPeerCount uint) ([]*enod
|
||||
return peersToDial, ctx.Err()
|
||||
}
|
||||
|
||||
// Skip peer not matching the filter.
|
||||
node := iterator.Node()
|
||||
if !s.filterPeer(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() > node.Seq() {
|
||||
if ok && existing.Seq() >= node.Seq() {
|
||||
continue // keep existing and skip.
|
||||
}
|
||||
|
||||
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
|
||||
// Skip peer not matching the filter.
|
||||
if !s.filterPeer(node) {
|
||||
if ok {
|
||||
// this means the existing peer with the lower sequence number is no longer valid
|
||||
delete(nodeByNodeID, existing.ID())
|
||||
missingPeerCount++
|
||||
}
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
// We found a new peer. Decrease the missing peer count.
|
||||
nodeByNodeID[node.ID()] = node
|
||||
missingPeerCount--
|
||||
}
|
||||
|
||||
@@ -677,7 +684,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
|
||||
peerData, multiAddrs, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
log.WithError(err).WithField("node", node.String()).Debug("Could not convert to peer data")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -844,7 +851,7 @@ func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
|
||||
multiAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, errors.Wrap(err, "retrieve multiaddrs from node")
|
||||
}
|
||||
|
||||
if len(multiAddrs) == 0 {
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
mathRand "math/rand"
|
||||
"net"
|
||||
@@ -58,6 +60,81 @@ func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
|
||||
return ipAddr, pkey
|
||||
}
|
||||
|
||||
// createTestNodeWithID creates a LocalNode for testing with deterministic private key
|
||||
// This is needed for deduplication tests where we need the same node ID across different sequence numbers
|
||||
func createTestNodeWithID(t *testing.T, id string) *enode.LocalNode {
|
||||
// Create a deterministic reader based on the ID for consistent key generation
|
||||
h := sha256.New()
|
||||
h.Write([]byte(id))
|
||||
seedBytes := h.Sum(nil)
|
||||
|
||||
// Create a deterministic reader using the seed
|
||||
deterministicReader := bytes.NewReader(seedBytes)
|
||||
|
||||
// Generate the private key using the same approach as the production code
|
||||
privKey, _, err := crypto.GenerateSecp256k1Key(deterministicReader)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to ECDSA private key for enode usage
|
||||
ecdsaPrivKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(privKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
|
||||
localNode := enode.NewLocalNode(db, ecdsaPrivKey)
|
||||
|
||||
// Set basic properties
|
||||
localNode.SetStaticIP(net.ParseIP("127.0.0.1"))
|
||||
localNode.Set(enr.TCP(3000))
|
||||
localNode.Set(enr.UDP(3000))
|
||||
localNode.Set(enr.WithEntry(eth2EnrKey, make([]byte, 16)))
|
||||
|
||||
return localNode
|
||||
}
|
||||
|
||||
// createTestNodeRandom creates a LocalNode for testing using the existing createAddrAndPrivKey function
|
||||
func createTestNodeRandom(t *testing.T) *enode.LocalNode {
|
||||
_, privKey := createAddrAndPrivKey(t)
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
|
||||
localNode := enode.NewLocalNode(db, privKey)
|
||||
|
||||
// Set basic properties
|
||||
localNode.SetStaticIP(net.ParseIP("127.0.0.1"))
|
||||
localNode.Set(enr.TCP(3000))
|
||||
localNode.Set(enr.UDP(3000))
|
||||
localNode.Set(enr.WithEntry(eth2EnrKey, make([]byte, 16)))
|
||||
|
||||
return localNode
|
||||
}
|
||||
|
||||
// setNodeSeq updates a LocalNode to have the specified sequence number
|
||||
func setNodeSeq(localNode *enode.LocalNode, seq uint64) {
|
||||
// Force set the sequence number - we need to update the record seq-1 times
|
||||
// because it starts at 1
|
||||
currentSeq := localNode.Node().Seq()
|
||||
for currentSeq < seq {
|
||||
localNode.Set(enr.WithEntry("dummy", currentSeq))
|
||||
currentSeq++
|
||||
}
|
||||
}
|
||||
|
||||
// setNodeSubnets sets the attestation subnets for a LocalNode
|
||||
func setNodeSubnets(localNode *enode.LocalNode, attSubnets []uint64) {
|
||||
if len(attSubnets) > 0 {
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, subnet := range attSubnets {
|
||||
bitV.SetBitAt(subnet, true)
|
||||
}
|
||||
localNode.Set(enr.WithEntry(attSubnetEnrKey, &bitV))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateListener(t *testing.T) {
|
||||
port := 1024
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
@@ -241,7 +318,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
|
||||
// Check fork is set.
|
||||
fork := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2ENRKey, fork)))
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2EnrKey, fork)))
|
||||
require.NotEmpty(t, *fork)
|
||||
|
||||
// Check att subnets.
|
||||
@@ -492,7 +569,7 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
|
||||
node := enode.NewLocalNode(db, key)
|
||||
node.Set(enr.IPv4{127, 0, 0, 1})
|
||||
node.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
|
||||
s := &Service{dv5Listener: mockListener{localNode: node}}
|
||||
s := &Service{dv5Listener: testp2p.NewMockListener(node, nil)}
|
||||
|
||||
multiAddresses, err := s.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
@@ -517,7 +594,7 @@ func TestDiscoveryV5_SeqNumber(t *testing.T) {
|
||||
node := enode.NewLocalNode(db, key)
|
||||
node.Set(enr.IPv4{127, 0, 0, 1})
|
||||
currentSeq := node.Seq()
|
||||
s := &Service{dv5Listener: mockListener{localNode: node}}
|
||||
s := &Service{dv5Listener: testp2p.NewMockListener(node, nil)}
|
||||
_, err = s.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
newSeq := node.Seq()
|
||||
@@ -529,7 +606,7 @@ func TestDiscoveryV5_SeqNumber(t *testing.T) {
|
||||
nodeTwo.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
|
||||
seqTwo := nodeTwo.Seq()
|
||||
assert.NotEqual(t, seqTwo, newSeq)
|
||||
sTwo := &Service{dv5Listener: mockListener{localNode: nodeTwo}}
|
||||
sTwo := &Service{dv5Listener: testp2p.NewMockListener(nodeTwo, nil)}
|
||||
_, err = sTwo.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, seqTwo+1, nodeTwo.Seq())
|
||||
@@ -886,3 +963,289 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
// Reset the config.
|
||||
params.OverrideBeaconConfig(defaultCfg)
|
||||
}
|
||||
|
||||
func TestFindPeers_NodeDeduplication(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
// Create LocalNodes and manipulate sequence numbers
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
localNode2 := createTestNodeWithID(t, "node2")
|
||||
localNode3 := createTestNodeWithID(t, "node3")
|
||||
|
||||
// Create different sequence versions of node1
|
||||
setNodeSeq(localNode1, 1)
|
||||
node1_seq1 := localNode1.Node()
|
||||
setNodeSeq(localNode1, 2)
|
||||
node1_seq2 := localNode1.Node() // Same ID, higher seq
|
||||
setNodeSeq(localNode1, 3)
|
||||
node1_seq3 := localNode1.Node() // Same ID, even higher seq
|
||||
|
||||
// Other nodes with seq 1
|
||||
node2_seq1 := localNode2.Node()
|
||||
node3_seq1 := localNode3.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*enode.Node
|
||||
missingPeers uint
|
||||
expectedCount int
|
||||
description string
|
||||
eval func(t *testing.T, result []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "No duplicates - all unique nodes",
|
||||
nodes: []*enode.Node{
|
||||
node2_seq1,
|
||||
node3_seq1,
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should return all unique nodes without deduplication",
|
||||
eval: nil, // No special validation needed
|
||||
},
|
||||
{
|
||||
name: "Duplicate with lower seq comes first - should replace",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1,
|
||||
node1_seq2, // Higher seq, should replace
|
||||
node2_seq1, // Different node added after duplicates are processed
|
||||
},
|
||||
missingPeers: 2, // Need 2 peers so we process all nodes
|
||||
expectedCount: 2, // Should get node1 (with higher seq) and node2
|
||||
description: "Should keep node with higher sequence number when duplicate found",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have node2 and node1 with higher seq (node1_seq2)
|
||||
foundNode1WithHigherSeq := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2.ID() {
|
||||
require.Equal(t, node1_seq2.Seq(), node.Seq(), "Node1 should have higher seq")
|
||||
foundNode1WithHigherSeq = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with higher seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with higher seq comes first - should keep existing",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq3, // Higher seq
|
||||
node1_seq2, // Lower seq, should be skipped (continue branch)
|
||||
node1_seq1, // Even lower seq, should also be skipped (continue branch)
|
||||
node2_seq1, // Different node added after duplicates are processed
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should keep existing node when it has higher sequence number and skip all lower seq duplicates",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have kept the node with highest seq (node1_seq3)
|
||||
foundNode1WithHigherSeq := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3.ID() {
|
||||
require.Equal(t, node1_seq3.Seq(), node.Seq(), "Node1 should have highest seq")
|
||||
foundNode1WithHigherSeq = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple duplicates with increasing seq",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1,
|
||||
node1_seq2, // Should replace seq1
|
||||
node1_seq3, // Should replace seq2
|
||||
node2_seq1, // Different node added after duplicates are processed
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should keep updating to highest sequence number",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have the node with highest seq (node1_seq3)
|
||||
foundNode1WithHigherSeq := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3.ID() {
|
||||
require.Equal(t, node1_seq3.Seq(), node.Seq(), "Node1 should have highest seq")
|
||||
foundNode1WithHigherSeq = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with equal seq comes after - should skip",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq2, // First occurrence
|
||||
node1_seq2, // Same exact node instance, should be skipped (continue branch for >= case)
|
||||
node2_seq1, // Different node
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should skip duplicate with equal sequence number",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have exactly one instance of node1_seq2 and one instance of node2_seq1
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2.ID() {
|
||||
require.Equal(t, node1_seq2.Seq(), node.Seq(), "Node1 should have the expected seq")
|
||||
require.Equal(t, false, foundNode1, "Should have only one instance of node1") // Ensure no duplicates
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1, "Should have node1")
|
||||
require.Equal(t, true, foundNode2, "Should have node2")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mix of unique and duplicate nodes",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1,
|
||||
node2_seq1,
|
||||
node1_seq2, // Should replace node1_seq1
|
||||
node3_seq1,
|
||||
node1_seq3, // Should replace node1_seq2
|
||||
},
|
||||
missingPeers: 3,
|
||||
expectedCount: 3,
|
||||
description: "Should handle mix of unique nodes and duplicates correctly",
|
||||
eval: nil, // Basic count validation is sufficient
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
},
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeers(ctxWithTimeout, tt.missingPeers)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// callbackIterator allows us to execute callbacks at specific points during iteration
|
||||
type callbackIterator struct {
|
||||
nodes []*enode.Node
|
||||
index int
|
||||
callbacks map[int]func() // map from index to callback function
|
||||
}
|
||||
|
||||
func (c *callbackIterator) Next() bool {
|
||||
// Execute callback before checking if we can continue (if one exists)
|
||||
if callback, exists := c.callbacks[c.index]; exists {
|
||||
callback()
|
||||
}
|
||||
|
||||
return c.index < len(c.nodes)
|
||||
}
|
||||
|
||||
func (c *callbackIterator) Node() *enode.Node {
|
||||
if c.index >= len(c.nodes) {
|
||||
return nil
|
||||
}
|
||||
|
||||
node := c.nodes[c.index]
|
||||
c.index++
|
||||
return node
|
||||
}
|
||||
|
||||
func (c *callbackIterator) Close() {
|
||||
// Nothing to clean up for this simple implementation
|
||||
}
|
||||
|
||||
func TestFindPeers_received_bad_existing_node(t *testing.T) {
|
||||
// This test successfully triggers delete(nodeByNodeID, node.ID()) in subnets.go by:
|
||||
// 1. Processing node1_seq1 first (passes filterPeer, gets added to map
|
||||
// 2. Callback marks peer as bad before processing node1_seq2"
|
||||
// 3. Processing node1_seq2 (fails filterPeer, triggers delete since ok=true
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
// Create LocalNode with same ID but different sequences
|
||||
localNode1 := createTestNodeWithID(t, "testnode")
|
||||
node1_seq1 := localNode1.Node() // Get current node
|
||||
currentSeq := node1_seq1.Seq()
|
||||
setNodeSeq(localNode1, currentSeq+1) // Increment sequence by 1
|
||||
node1_seq2 := localNode1.Node() // This should have higher seq
|
||||
|
||||
// Additional node to ensure we have enough peers to process
|
||||
localNode2 := createTestNodeWithID(t, "othernode")
|
||||
node2 := localNode2.Node()
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
},
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Create iterator with callback that marks peer as bad before processing node1_seq2
|
||||
iter := &callbackIterator{
|
||||
nodes: []*enode.Node{node1_seq1, node1_seq2, node2},
|
||||
index: 0,
|
||||
callbacks: map[int]func(){
|
||||
1: func() { // Before processing node1_seq2 (index 1)
|
||||
// Mark peer as bad before processing node1_seq2
|
||||
peerData, _, _ := convertToAddrInfo(node1_seq2)
|
||||
if peerData != nil {
|
||||
service.peers.Add(node1_seq2.Record(), peerData.ID, nil, network.DirUnknown)
|
||||
// Mark as bad peer - need enough increments to exceed threshold (6)
|
||||
for i := 0; i < 10; i++ {
|
||||
service.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
|
||||
|
||||
// Run findPeers - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
|
||||
ctxWithTimeout, cancel := context.WithTimeout(t.Context(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := service.findPeers(ctxWithTimeout, 3)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(result))
|
||||
}
|
||||
|
||||
@@ -13,10 +13,17 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errEth2ENRDigestMismatch = errors.New("fork digest of peer does not match local value")
|
||||
var (
|
||||
errForkScheduleMismatch = errors.New("peer fork schedule incompatible")
|
||||
errCurrentDigestMismatch = errors.Wrap(errForkScheduleMismatch, "current_fork_digest mismatch")
|
||||
errNextVersionMismatch = errors.Wrap(errForkScheduleMismatch, "next_fork_version mismatch")
|
||||
errNextDigestMismatch = errors.Wrap(errForkScheduleMismatch, "nfd (next fork digest) mismatch")
|
||||
)
|
||||
|
||||
// ENR key used for Ethereum consensus-related fork data.
|
||||
var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
|
||||
const (
|
||||
eth2EnrKey = "eth2" // The `eth2` ENR entry advertizes the node's view of the fork schedule with an ssz-encoded ENRForkID value.
|
||||
nfdEnrKey = "nfd" // The `nfd` ENR entry separately advertizes the "next fork digest" aspect of the fork schedule.
|
||||
)
|
||||
|
||||
// ForkDigest returns the current fork digest of
|
||||
// the node according to the local clock.
|
||||
@@ -33,44 +40,86 @@ func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
// Compares fork ENRs between an incoming peer's record and our node's
|
||||
// local record values for current and next fork version/epoch.
|
||||
func compareForkENR(self, peer *enr.Record) error {
|
||||
peerForkENR, err := forkEntry(peer)
|
||||
peerEntry, err := forkEntry(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentForkENR, err := forkEntry(self)
|
||||
selfEntry, err := forkEntry(self)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enrString, err := SerializeENR(peer)
|
||||
peerString, err := SerializeENR(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Clients SHOULD connect to peers with current_fork_digest, next_fork_version,
|
||||
// and next_fork_epoch that match local values.
|
||||
if !bytes.Equal(peerForkENR.CurrentForkDigest, currentForkENR.CurrentForkDigest) {
|
||||
return errors.Wrapf(errEth2ENRDigestMismatch,
|
||||
if !bytes.Equal(peerEntry.CurrentForkDigest, selfEntry.CurrentForkDigest) {
|
||||
return errors.Wrapf(errCurrentDigestMismatch,
|
||||
"fork digest of peer with ENR %s: %v, does not match local value: %v",
|
||||
enrString,
|
||||
peerForkENR.CurrentForkDigest,
|
||||
currentForkENR.CurrentForkDigest,
|
||||
peerString,
|
||||
peerEntry.CurrentForkDigest,
|
||||
selfEntry.CurrentForkDigest,
|
||||
)
|
||||
}
|
||||
|
||||
// Clients MAY connect to peers with the same current_fork_version but a
|
||||
// different next_fork_version/next_fork_epoch. Unless ENRForkID is manually
|
||||
// updated to matching prior to the earlier next_fork_epoch of the two clients,
|
||||
// these type of connecting clients will be unable to successfully interact
|
||||
// starting at the earlier next_fork_epoch.
|
||||
if peerForkENR.NextForkEpoch != currentForkENR.NextForkEpoch {
|
||||
if peerEntry.NextForkEpoch != selfEntry.NextForkEpoch {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerNextForkEpoch": peerForkENR.NextForkEpoch,
|
||||
"peerENR": enrString,
|
||||
"peerNextForkEpoch": peerEntry.NextForkEpoch,
|
||||
"peerNextForkVersion": peerEntry.NextForkVersion,
|
||||
"peerENR": peerString,
|
||||
}).Trace("Peer matches fork digest but has different next fork epoch")
|
||||
// We allow the connection because we have a different view of the next fork epoch. This
|
||||
// could be due to peers that have no upgraded ahead of a fork or BPO schedule change, so
|
||||
// we allow the connection to continue until the fork boundary.
|
||||
return nil
|
||||
}
|
||||
if !bytes.Equal(peerForkENR.NextForkVersion, currentForkENR.NextForkVersion) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerNextForkVersion": peerForkENR.NextForkVersion,
|
||||
"peerENR": enrString,
|
||||
}).Trace("Peer matches fork digest but has different next fork version")
|
||||
|
||||
// Since we agree on the next fork epoch, we require next fork version to also be in agreement.
|
||||
if !bytes.Equal(peerEntry.NextForkVersion, selfEntry.NextForkVersion) {
|
||||
return errors.Wrapf(errNextVersionMismatch,
|
||||
"next fork version of peer with ENR %s: %#x, does not match local value: %#x",
|
||||
peerString, peerEntry.NextForkVersion, selfEntry.NextForkVersion)
|
||||
}
|
||||
|
||||
// Fulu adds the following to the spec:
|
||||
// ---
|
||||
// A new entry is added to the ENR under the key nfd, short for next fork digest. This entry
|
||||
// communicates the digest of the next scheduled fork, regardless of whether it is a regular
|
||||
// or a Blob-Parameters-Only fork. This new entry MUST be added once FULU_FORK_EPOCH is assigned
|
||||
// any value other than FAR_FUTURE_EPOCH. Adding this entry prior to the Fulu fork will not
|
||||
// impact peering as nodes will ignore unknown ENR entries and nfd mismatches do not cause
|
||||
// disconnects.
|
||||
// When discovering and interfacing with peers, nodes MUST evaluate nfd alongside their existing
|
||||
// consideration of the ENRForkID::next_* fields under the eth2 key, to form a more accurate
|
||||
// view of the peer's intended next fork for the purposes of sustained peering. If there is a
|
||||
// mismatch, the node MUST NOT disconnect before the fork boundary, but it MAY disconnect
|
||||
// at/after the fork boundary.
|
||||
|
||||
// Nodes unprepared to follow the Fulu fork will be unaware of nfd entries. However, their
|
||||
// existing comparison of eth2 entries (concretely next_fork_epoch) is sufficient to detect
|
||||
// upcoming divergence.
|
||||
// ---
|
||||
|
||||
// Because this is a new in-bound connection, we lean into the pre-fulu point that clients
|
||||
// MAY connect to peers with the same current_fork_version but a different
|
||||
// next_fork_version/next_fork_epoch, which implies we can chose to not connect to them when these
|
||||
// don't match.
|
||||
//
|
||||
// Given that the next_fork_epoch matches, we will require the next_fork_digest to match.
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
peerNFD, selfNFD := nfd(peer), nfd(self)
|
||||
if peerNFD != selfNFD {
|
||||
return errors.Wrapf(errNextDigestMismatch,
|
||||
"next fork digest of peer with ENR %s: %v, does not match local value: %v",
|
||||
peerString, peerNFD, selfNFD)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -102,7 +151,7 @@ func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
forkEntry := enr.WithEntry(eth2ENRKey, enc)
|
||||
forkEntry := enr.WithEntry(eth2EnrKey, enc)
|
||||
node.Set(forkEntry)
|
||||
return nil
|
||||
}
|
||||
@@ -111,7 +160,7 @@ func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) e
|
||||
// under the Ethereum consensus EnrKey
|
||||
func forkEntry(record *enr.Record) (*pb.ENRForkID, error) {
|
||||
sszEncodedForkEntry := make([]byte, 16)
|
||||
entry := enr.WithEntry(eth2ENRKey, &sszEncodedForkEntry)
|
||||
entry := enr.WithEntry(eth2EnrKey, &sszEncodedForkEntry)
|
||||
err := record.Load(entry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -122,3 +171,15 @@ func forkEntry(record *enr.Record) (*pb.ENRForkID, error) {
|
||||
}
|
||||
return forkEntry, nil
|
||||
}
|
||||
|
||||
// nfd retrieves the value of the `nfd` ("next fork digest") key from an ENR record.
|
||||
func nfd(record *enr.Record) [4]byte {
|
||||
digest := [4]byte{}
|
||||
entry := enr.WithEntry(nfdEnrKey, &digest)
|
||||
if err := record.Load(entry); err != nil {
|
||||
// Treat a missing nfd entry as an empty digest.
|
||||
// We do this to avoid errors when checking peers that have not upgraded for fulu.
|
||||
return [4]byte{}
|
||||
}
|
||||
return digest
|
||||
}
|
||||
|
||||
@@ -16,14 +16,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestCompareForkENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
@@ -61,10 +59,10 @@ func TestCompareForkENR(t *testing.T) {
|
||||
require.NoError(t, updateENR(peer, currentCopy, next))
|
||||
return peer.Node()
|
||||
},
|
||||
expectErr: errEth2ENRDigestMismatch,
|
||||
expectErr: errCurrentDigestMismatch,
|
||||
},
|
||||
{
|
||||
name: "next fork version mismatch",
|
||||
name: "next_fork_epoch match, next_fork_version mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
@@ -75,25 +73,44 @@ func TestCompareForkENR(t *testing.T) {
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
expectLog: "Peer matches fork digest but has different next fork version",
|
||||
expectErr: errNextVersionMismatch,
|
||||
},
|
||||
{
|
||||
name: "next fork epoch mismatch",
|
||||
name: "next fork epoch mismatch, next fork digest mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
nextCopy := next
|
||||
// next epoch does not match, and neither does the next fork digest.
|
||||
nextCopy.Epoch = nextCopy.Epoch + 1
|
||||
nfd := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, next.ForkDigest, nfd)
|
||||
//peer.Set(enr.WithEntry(nfdEnrKey, nfd[:]))
|
||||
nextCopy.ForkDigest = nfd
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
expectLog: "Peer matches fork digest but has different next fork epoch",
|
||||
// no error because we allow a different next fork version / digest if the next fork epoch does not match
|
||||
},
|
||||
{
|
||||
name: "next fork epoch -match-, next fork digest mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
nextCopy := next
|
||||
nfd := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
// next epoch *does match*, but the next fork digest doesn't - so we should get an error.
|
||||
require.NotEqual(t, next.ForkDigest, nfd)
|
||||
nextCopy.ForkDigest = nfd
|
||||
//peer.Set(enr.WithEntry(nfdEnrKey, nfd[:]))
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
expectErr: errNextDigestMismatch,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
peer := c.node(t)
|
||||
err := compareForkENR(self.Node().Record(), peer.Record())
|
||||
if c.expectErr != nil {
|
||||
@@ -101,13 +118,27 @@ func TestCompareForkENR(t *testing.T) {
|
||||
} else {
|
||||
require.NoError(t, err, "Expected no error comparing fork ENRs")
|
||||
}
|
||||
if c.expectLog != "" {
|
||||
require.LogsContain(t, hook, c.expectLog, "Expected log message not found")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNfdSetAndLoad(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
_, k := createAddrAndPrivKey(t)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next.ForkDigest = [4]byte{0xFF, 0xFF, 0xFF, 0xFF} // Ensure a unique digest for testing.
|
||||
self := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(self, current, next))
|
||||
n := nfd(self.Node().Record())
|
||||
assert.Equal(t, next.ForkDigest, n, "Expected nfd to match next fork digest")
|
||||
}
|
||||
|
||||
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
@@ -122,7 +153,7 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
}
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
entry := enr.WithEntry(eth2ENRKey, enc)
|
||||
entry := enr.WithEntry(eth2EnrKey, enc)
|
||||
temp := t.TempDir()
|
||||
randNum := rand.Int()
|
||||
tempPath := path.Join(temp, strconv.Itoa(randNum))
|
||||
|
||||
@@ -3,6 +3,7 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -75,7 +76,7 @@ var (
|
||||
tenEpochs = 10 * oneEpochDuration()
|
||||
)
|
||||
|
||||
func peerScoringParams() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds) {
|
||||
func peerScoringParams(colocationWhitelist []*net.IPNet) (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds) {
|
||||
thresholds := &pubsub.PeerScoreThresholds{
|
||||
GossipThreshold: -4000,
|
||||
PublishThreshold: -8000,
|
||||
@@ -83,6 +84,7 @@ func peerScoringParams() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds)
|
||||
AcceptPXThreshold: 100,
|
||||
OpportunisticGraftThreshold: 5,
|
||||
}
|
||||
|
||||
scoreParams := &pubsub.PeerScoreParams{
|
||||
Topics: make(map[string]*pubsub.TopicScoreParams),
|
||||
TopicScoreCap: 32.72,
|
||||
@@ -92,7 +94,7 @@ func peerScoringParams() (*pubsub.PeerScoreParams, *pubsub.PeerScoreThresholds)
|
||||
AppSpecificWeight: 1,
|
||||
IPColocationFactorWeight: -35.11,
|
||||
IPColocationFactorThreshold: 10,
|
||||
IPColocationFactorWhitelist: nil,
|
||||
IPColocationFactorWhitelist: colocationWhitelist,
|
||||
BehaviourPenaltyWeight: -15.92,
|
||||
BehaviourPenaltyThreshold: 6,
|
||||
BehaviourPenaltyDecay: scoreDecay(tenEpochs),
|
||||
|
||||
@@ -51,7 +51,7 @@ type (
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumn(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
BroadcastDataColumnSidecar(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -18,6 +18,7 @@ var (
|
||||
"lodestar",
|
||||
"js-libp2p",
|
||||
"rust-libp2p",
|
||||
"erigon/caplin",
|
||||
}
|
||||
p2pPeerCount = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "p2p_peer_count",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
mplex "github.com/libp2p/go-libp2p-mplex"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
@@ -58,6 +59,22 @@ func MultiAddressBuilder(ip net.IP, tcpPort, quicPort uint) ([]ma.Multiaddr, err
|
||||
return multiaddrs, nil
|
||||
}
|
||||
|
||||
// setConnManagerOption sets the connection manager option for libp2p based on the
|
||||
// MaxPeers setting in the p2p config. If MaxPeers is set to a value higher than the
|
||||
// default high water mark, we create a new connection manager with a high water mark
|
||||
// that is higher than MaxPeers. Otherwise, we do not set a connection manager option
|
||||
// and allow the libp2p fallback defaults to be applied. Rationale below:
|
||||
// see: https://github.com/OffchainLabs/prysm/issues/15607
|
||||
func setConnManagerOption(cfg *Config, opts []libp2p.Option) ([]libp2p.Option, error) {
|
||||
low, high := cfg.connManagerLowHigh()
|
||||
cm, err := connmgr.NewConnManager(low, high)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new ConnManager")
|
||||
}
|
||||
opts = append(opts, libp2p.ConnectionManager(cm))
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// buildOptions for the libp2p host.
|
||||
func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Option, error) {
|
||||
cfg := s.cfg
|
||||
@@ -84,7 +101,6 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot get ID from public key: %s", ifaceKey.GetPublic().Type().String())
|
||||
}
|
||||
|
||||
log.Infof("Running node with peer id of %s ", id.String())
|
||||
|
||||
options := []libp2p.Option{
|
||||
@@ -98,6 +114,10 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.Ping(false), // Disable Ping Service.
|
||||
}
|
||||
options, err = setConnManagerOption(s.cfg, options)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "set connection manager option")
|
||||
}
|
||||
|
||||
if features.Get().EnableQUIC {
|
||||
options = append(options, libp2p.Transport(libp2pquic.NewTransport))
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/connmgr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
@@ -134,6 +135,59 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
|
||||
}
|
||||
|
||||
func TestSetConnManagerOption(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
maxPeers uint
|
||||
highWater int
|
||||
}{
|
||||
{
|
||||
name: "MaxPeers lower than default high water mark",
|
||||
maxPeers: defaultConnManagerPruneAbove - 1,
|
||||
highWater: defaultConnManagerPruneAbove,
|
||||
},
|
||||
{
|
||||
name: "MaxPeers equal to default high water mark",
|
||||
maxPeers: defaultConnManagerPruneAbove,
|
||||
highWater: defaultConnManagerPruneAbove,
|
||||
},
|
||||
{
|
||||
name: "MaxPeers higher than default high water mark",
|
||||
maxPeers: defaultConnManagerPruneAbove + 1,
|
||||
highWater: defaultConnManagerPruneAbove + 1 + connManagerPruneAmount,
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cfg := &Config{MaxPeers: tt.maxPeers}
|
||||
opts, err := setConnManagerOption(cfg, []libp2p.Option{})
|
||||
assert.NoError(t, err)
|
||||
_, high := cfg.connManagerLowHigh()
|
||||
require.Equal(t, true, high > int(cfg.MaxPeers))
|
||||
|
||||
var libCfg libp2p.Config
|
||||
require.NoError(t, libCfg.Apply(append(opts, libp2p.FallbackDefaults)...))
|
||||
checkLimit(t, libCfg.ConnManager, high)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type connLimitGetter int
|
||||
|
||||
func (m connLimitGetter) GetConnLimit() int {
|
||||
return int(m)
|
||||
}
|
||||
|
||||
// CheckLimit will return an error if the result of calling lg.GetConnLimit is greater than
|
||||
// the high water mark. So by checking the result of calling it with a value equal to and lower
|
||||
// than the expected value, we can determine the value it holds internally.
|
||||
func checkLimit(t *testing.T, cm connmgr.ConnManager, expected int) {
|
||||
require.NoError(t, cm.CheckLimit(connLimitGetter(expected)), "Connection manager limit check failed")
|
||||
if err := cm.CheckLimit(connLimitGetter(expected - 1)); err == nil {
|
||||
t.Errorf("connection manager limit is below the expected value of %d", expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiAddressBuilderWithID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -25,6 +25,7 @@ package peers
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -87,11 +88,12 @@ const (
|
||||
|
||||
// Status is the structure holding the peer status information.
|
||||
type Status struct {
|
||||
ctx context.Context
|
||||
scorers *scorers.Service
|
||||
store *peerdata.Store
|
||||
ipTracker map[string]uint64
|
||||
rand *rand.Rand
|
||||
ctx context.Context
|
||||
scorers *scorers.Service
|
||||
store *peerdata.Store
|
||||
ipTracker map[string]uint64
|
||||
rand *rand.Rand
|
||||
ipColocationWhitelist []*net.IPNet
|
||||
}
|
||||
|
||||
// StatusConfig represents peer status service params.
|
||||
@@ -100,6 +102,8 @@ type StatusConfig struct {
|
||||
PeerLimit int
|
||||
// ScorerParams holds peer scorer configuration params.
|
||||
ScorerParams *scorers.Config
|
||||
// IPColocationWhitelist contains CIDR ranges that are exempt from IP colocation limits.
|
||||
IPColocationWhitelist []*net.IPNet
|
||||
}
|
||||
|
||||
// NewStatus creates a new status entity.
|
||||
@@ -107,11 +111,13 @@ func NewStatus(ctx context.Context, config *StatusConfig) *Status {
|
||||
store := peerdata.NewStore(ctx, &peerdata.StoreConfig{
|
||||
MaxPeers: maxLimitBuffer + config.PeerLimit,
|
||||
})
|
||||
|
||||
return &Status{
|
||||
ctx: ctx,
|
||||
store: store,
|
||||
scorers: scorers.NewService(ctx, store, config.ScorerParams),
|
||||
ipTracker: map[string]uint64{},
|
||||
ctx: ctx,
|
||||
store: store,
|
||||
scorers: scorers.NewService(ctx, store, config.ScorerParams),
|
||||
ipTracker: map[string]uint64{},
|
||||
ipColocationWhitelist: config.IPColocationWhitelist,
|
||||
// Random generator used to calculate dial backoff period.
|
||||
// It is ok to use deterministic generator, no need for true entropy.
|
||||
rand: rand.NewDeterministicGenerator(),
|
||||
@@ -1046,6 +1052,13 @@ func (p *Status) isfromBadIP(pid peer.ID) error {
|
||||
|
||||
if val, ok := p.ipTracker[ip.String()]; ok {
|
||||
if val > CollocationLimit {
|
||||
// Check if IP is in the whitelist
|
||||
for _, ipNet := range p.ipColocationWhitelist {
|
||||
if ipNet.Contains(ip) {
|
||||
// IP is whitelisted, skip colocation limit check
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.Errorf(
|
||||
"colocation limit exceeded: got %d - limit %d for peer %v with IP %v",
|
||||
val, CollocationLimit, pid, ip.String(),
|
||||
|
||||
@@ -145,7 +145,7 @@ func (s *Service) pubsubOptions() []pubsub.Option {
|
||||
pubsub.WithPeerOutboundQueueSize(int(s.cfg.QueueSize)),
|
||||
pubsub.WithMaxMessageSize(int(MaxMessageSize())), // lint:ignore uintcast -- Max Message Size is a config value and is naturally bounded by networking limitations.
|
||||
pubsub.WithValidateQueueSize(int(s.cfg.QueueSize)),
|
||||
pubsub.WithPeerScore(peerScoringParams()),
|
||||
pubsub.WithPeerScore(peerScoringParams(s.cfg.IPColocationWhitelist)),
|
||||
pubsub.WithPeerScoreInspect(s.peerInspector, time.Minute),
|
||||
pubsub.WithGossipSubParams(pubsubGossipParam()),
|
||||
pubsub.WithRawTracer(gossipTracer{host: s.host}),
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -40,49 +41,68 @@ func (s *Service) setAllForkDigests() {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errNotReadyToSubscribe = fmt.Errorf("not ready to subscribe, service is not initialized")
|
||||
errMissingLeadingSlash = fmt.Errorf("topic is missing leading slash")
|
||||
errTopicMissingProtocolVersion = fmt.Errorf("topic is missing protocol version (eth2)")
|
||||
errTopicPathWrongPartCount = fmt.Errorf("topic path has wrong part count")
|
||||
errDigestInvalid = fmt.Errorf("digest is invalid")
|
||||
errDigestUnexpected = fmt.Errorf("digest is unexpected")
|
||||
errSnappySuffixMissing = fmt.Errorf("snappy suffix is missing")
|
||||
errTopicNotFound = fmt.Errorf("topic not found in gossip topic mappings")
|
||||
)
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
func (s *Service) CanSubscribe(topic string) bool {
|
||||
if !s.isInitialized() {
|
||||
if err := s.checkSubscribable(topic); err != nil {
|
||||
if !errors.Is(err, errNotReadyToSubscribe) {
|
||||
logrus.WithError(err).WithField("topic", topic).Debug("CanSubscribe failed")
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Service) checkSubscribable(topic string) error {
|
||||
if !s.isInitialized() {
|
||||
return errNotReadyToSubscribe
|
||||
}
|
||||
parts := strings.Split(topic, "/")
|
||||
if len(parts) != 5 {
|
||||
return false
|
||||
return errTopicPathWrongPartCount
|
||||
}
|
||||
// The topic must start with a slash, which means the first part will be empty.
|
||||
if parts[0] != "" {
|
||||
return false
|
||||
return errMissingLeadingSlash
|
||||
}
|
||||
if parts[1] != "eth2" {
|
||||
return false
|
||||
protocol, rawDigest, suffix := parts[1], parts[2], parts[4]
|
||||
if protocol != "eth2" {
|
||||
return errTopicMissingProtocolVersion
|
||||
}
|
||||
if suffix != encoder.ProtocolSuffixSSZSnappy {
|
||||
return errSnappySuffixMissing
|
||||
}
|
||||
|
||||
var digest [4]byte
|
||||
dl, err := hex.Decode(digest[:], []byte(parts[2]))
|
||||
if err == nil && dl != 4 {
|
||||
err = fmt.Errorf("expected 4 bytes, got %d", dl)
|
||||
}
|
||||
dl, err := hex.Decode(digest[:], []byte(rawDigest))
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("topic", topic).WithField("digest", parts[2]).Error("CanSubscribe failed to parse message")
|
||||
return false
|
||||
return errors.Wrapf(errDigestInvalid, "%v", err)
|
||||
}
|
||||
if dl != 4 {
|
||||
return errors.Wrapf(errDigestInvalid, "wrong byte length")
|
||||
}
|
||||
if _, ok := s.allForkDigests[digest]; !ok {
|
||||
log.WithField("topic", topic).WithField("digest", fmt.Sprintf("%#x", digest)).Error("CanSubscribe failed to find digest in allForkDigests")
|
||||
return false
|
||||
}
|
||||
|
||||
if parts[4] != encoder.ProtocolSuffixSSZSnappy {
|
||||
return false
|
||||
return errDigestUnexpected
|
||||
}
|
||||
|
||||
// Check the incoming topic matches any topic mapping. This includes a check for part[3].
|
||||
for gt := range gossipTopicMappings {
|
||||
if _, err := scanfcheck(strings.Join(parts[0:4], "/"), gt); err == nil {
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return errTopicNotFound
|
||||
}
|
||||
|
||||
// FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications.
|
||||
@@ -100,7 +120,22 @@ func (s *Service) FilterIncomingSubscriptions(peerID peer.ID, subs []*pubsubpb.R
|
||||
return nil, pubsub.ErrTooManySubscriptions
|
||||
}
|
||||
|
||||
return pubsub.FilterSubscriptions(subs, s.CanSubscribe), nil
|
||||
return pubsub.FilterSubscriptions(subs, s.logCheckSubscribableError(peerID)), nil
|
||||
}
|
||||
|
||||
func (s *Service) logCheckSubscribableError(pid peer.ID) func(string) bool {
|
||||
return func(topic string) bool {
|
||||
if err := s.checkSubscribable(topic); err != nil {
|
||||
if !errors.Is(err, errNotReadyToSubscribe) {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"topic": topic,
|
||||
}).Debug("Peer subscription rejected")
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// scanfcheck uses fmt.Sscanf to check that a given string matches expected format. This method
|
||||
|
||||
@@ -169,7 +169,7 @@ var (
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
|
||||
@@ -178,7 +178,8 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
s.pubsub = gs
|
||||
|
||||
s.peers = peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: int(s.cfg.MaxPeers),
|
||||
PeerLimit: int(s.cfg.MaxPeers),
|
||||
IPColocationWhitelist: s.cfg.IPColocationWhitelist,
|
||||
ScorerParams: &scorers.Config{
|
||||
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
|
||||
Threshold: maxBadResponses,
|
||||
|
||||
@@ -13,13 +13,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -30,48 +30,6 @@ import (
|
||||
|
||||
const testPingInterval = 100 * time.Millisecond
|
||||
|
||||
type mockListener struct {
|
||||
localNode *enode.LocalNode
|
||||
}
|
||||
|
||||
func (m mockListener) Self() *enode.Node {
|
||||
return m.localNode.Node()
|
||||
}
|
||||
|
||||
func (mockListener) Close() {
|
||||
// no-op
|
||||
}
|
||||
|
||||
func (mockListener) Lookup(enode.ID) []*enode.Node {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) ReadRandomNodes(_ []*enode.Node) int {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) Resolve(*enode.Node) *enode.Node {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) Ping(*enode.Node) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) RequestENR(*enode.Node) (*enode.Node, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) LocalNode() *enode.LocalNode {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) RandomNodes() enode.Iterator {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) RebootListener() error { panic("implement me") }
|
||||
|
||||
func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP) {
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
@@ -87,7 +45,7 @@ func TestService_Stop_SetsStartedToFalse(t *testing.T) {
|
||||
s, err := NewService(t.Context(), &Config{StateNotifier: &mock.MockStateNotifier{}, DB: testDB.SetupDB(t)})
|
||||
require.NoError(t, err)
|
||||
s.started = true
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
assert.NoError(t, s.Stop())
|
||||
assert.Equal(t, false, s.started)
|
||||
}
|
||||
@@ -113,7 +71,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
}
|
||||
s, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
s.custodyInfo = &custodyInfo{}
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
@@ -133,14 +91,14 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
func TestService_Status_NotRunning(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s := &Service{started: false}
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
assert.ErrorContains(t, "not running", s.Status(), "Status returned wrong error")
|
||||
}
|
||||
|
||||
func TestService_Status_NoGenesisTimeSet(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s := &Service{started: true}
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
assert.ErrorContains(t, "no genesis time set", s.Status(), "Status returned wrong error")
|
||||
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
@@ -27,8 +27,6 @@ import (
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
const nfdEnrKey = "nfd" // The ENR record key for "nfd" (Next Fork Digest).
|
||||
|
||||
var (
|
||||
attestationSubnetCount = params.BeaconConfig().AttestationSubnetCount
|
||||
syncCommsSubnetCount = params.BeaconConfig().SyncCommitteeSubnetCount
|
||||
@@ -112,6 +110,11 @@ func (s *Service) FindAndDialPeersWithSubnets(
|
||||
}
|
||||
|
||||
peersToDial, err := func() ([]*enode.Node, error) {
|
||||
for subnet := range defectiveSubnets {
|
||||
s.subnetLocker(subnet).Lock()
|
||||
defer s.subnetLocker(subnet).Unlock()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, batchPeriod)
|
||||
defer cancel()
|
||||
|
||||
@@ -136,6 +139,24 @@ func (s *Service) FindAndDialPeersWithSubnets(
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDefectiveSubnets updates the defective subnets map when a node with matching subnets is found.
|
||||
// It decrements the defective count for each subnet the node satisfies and removes subnets
|
||||
// that are fully satisfied (count reaches 0).
|
||||
func updateDefectiveSubnets(
|
||||
nodeSubnets map[uint64]bool,
|
||||
defectiveSubnets map[uint64]int,
|
||||
) {
|
||||
for subnet := range defectiveSubnets {
|
||||
if !nodeSubnets[subnet] {
|
||||
continue
|
||||
}
|
||||
defectiveSubnets[subnet]--
|
||||
if defectiveSubnets[subnet] == 0 {
|
||||
delete(defectiveSubnets, subnet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// findPeersWithSubnets finds peers subscribed to defective subnets in batches
|
||||
// until enough peers are found or the context is canceled.
|
||||
// It returns new peers found during the search.
|
||||
@@ -171,6 +192,7 @@ func (s *Service) findPeersWithSubnets(
|
||||
|
||||
// Crawl the network for peers subscribed to the defective subnets.
|
||||
nodeByNodeID := make(map[enode.ID]*enode.Node)
|
||||
|
||||
for len(defectiveSubnets) > 0 && iterator.Next() {
|
||||
if err := ctx.Err(); err != nil {
|
||||
// Convert the map to a slice.
|
||||
@@ -182,14 +204,28 @@ func (s *Service) findPeersWithSubnets(
|
||||
return peersToDial, err
|
||||
}
|
||||
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
node := iterator.Node()
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() >= node.Seq() {
|
||||
continue // keep existing and skip.
|
||||
}
|
||||
|
||||
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
|
||||
// Skip peer not matching the filter.
|
||||
if !s.filterPeer(node) {
|
||||
if ok {
|
||||
// this means the existing peer with the lower sequence number is no longer valid
|
||||
delete(nodeByNodeID, existing.ID())
|
||||
// Note: We are choosing to not rollback changes to the defective subnets map in favor of calling s.defectiveSubnets once again after dialing peers.
|
||||
// This is a case that should rarely happen and should be handled through a second iteration in FindAndDialPeersWithSubnets
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
nodeSubnets, err := filter(node)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter node")
|
||||
@@ -198,30 +234,14 @@ func (s *Service) findPeersWithSubnets(
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() > node.Seq() {
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
// We found a new peer. Modify the defective subnets map
|
||||
// and the filter accordingly.
|
||||
for subnet := range defectiveSubnets {
|
||||
if !nodeSubnets[subnet] {
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
defectiveSubnets[subnet]--
|
||||
|
||||
if defectiveSubnets[subnet] == 0 {
|
||||
delete(defectiveSubnets, subnet)
|
||||
}
|
||||
|
||||
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
updateDefectiveSubnets(nodeSubnets, defectiveSubnets)
|
||||
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,14 +10,19 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ecdsaprysm "github.com/OffchainLabs/prysm/v6/crypto/ecdsa"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
@@ -541,3 +546,552 @@ func TestInitializePersistentSubnets(t *testing.T) {
|
||||
assert.Equal(t, 2, len(subs))
|
||||
assert.Equal(t, true, expTime.After(time.Now()))
|
||||
}
|
||||
|
||||
func TestFindPeersWithSubnets_NodeDeduplication(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
localNode2 := createTestNodeWithID(t, "node2")
|
||||
localNode3 := createTestNodeWithID(t, "node3")
|
||||
|
||||
// Create different sequence versions of node1 with subnet 1
|
||||
setNodeSubnets(localNode1, []uint64{1})
|
||||
setNodeSeq(localNode1, 1)
|
||||
node1_seq1_subnet1 := localNode1.Node()
|
||||
setNodeSeq(localNode1, 2)
|
||||
node1_seq2_subnet1 := localNode1.Node() // Same ID, higher seq
|
||||
setNodeSeq(localNode1, 3)
|
||||
node1_seq3_subnet1 := localNode1.Node() // Same ID, even higher seq
|
||||
|
||||
// Node2 with different sequences and subnets
|
||||
setNodeSubnets(localNode2, []uint64{1})
|
||||
node2_seq1_subnet1 := localNode2.Node()
|
||||
setNodeSubnets(localNode2, []uint64{2}) // Different subnet
|
||||
setNodeSeq(localNode2, 2)
|
||||
node2_seq2_subnet2 := localNode2.Node()
|
||||
|
||||
// Node3 with multiple subnets
|
||||
setNodeSubnets(localNode3, []uint64{1, 2})
|
||||
node3_seq1_subnet1_2 := localNode3.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*enode.Node
|
||||
defectiveSubnets map[uint64]int
|
||||
expectedCount int
|
||||
description string
|
||||
eval func(t *testing.T, result []*enode.Node) // Custom validation function
|
||||
}{
|
||||
{
|
||||
name: "No duplicates - unique nodes with same subnet",
|
||||
nodes: []*enode.Node{
|
||||
node2_seq1_subnet1,
|
||||
node3_seq1_subnet1_2,
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should return all unique nodes subscribed to subnet",
|
||||
eval: nil, // No special validation needed
|
||||
},
|
||||
{
|
||||
name: "Duplicate with lower seq first - should replace",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_subnet1,
|
||||
node1_seq2_subnet1, // Higher seq, should replace
|
||||
node2_seq1_subnet1, // Different node to ensure we process enough nodes
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
expectedCount: 2,
|
||||
description: "Should replace with higher seq node for same subnet",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
found := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2_subnet1.ID() && node.Seq() == node1_seq2_subnet1.Seq() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Should have node with higher seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with higher seq first - should keep existing",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq3_subnet1, // Higher seq
|
||||
node1_seq2_subnet1, // Lower seq, should be skipped (continue branch)
|
||||
node1_seq1_subnet1, // Even lower seq, should also be skipped (continue branch)
|
||||
node2_seq1_subnet1, // Different node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should keep existing node with higher seq and skip lower seq duplicates",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
found := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_subnet1.ID() && node.Seq() == node1_seq3_subnet1.Seq() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Should have node with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple updates for same node",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_subnet1,
|
||||
node1_seq2_subnet1, // Should replace seq1
|
||||
node1_seq3_subnet1, // Should replace seq2
|
||||
node2_seq1_subnet1, // Different node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should keep updating to highest seq",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
found := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_subnet1.ID() && node.Seq() == node1_seq3_subnet1.Seq() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Should have node with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with equal seq in subnets - should skip",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq2_subnet1, // First occurrence
|
||||
node1_seq2_subnet1, // Same exact node instance, should be skipped (continue branch)
|
||||
node2_seq1_subnet1, // Different node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should skip duplicate with equal sequence number in subnet search",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
node1Count := 0
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2_subnet1.ID() {
|
||||
require.Equal(t, node1_seq2_subnet1.Seq(), node.Seq(), "Node1 should have expected seq")
|
||||
foundNode1 = true
|
||||
node1Count++
|
||||
}
|
||||
if node.ID() == node2_seq1_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1, "Should have node1")
|
||||
require.Equal(t, true, foundNode2, "Should have node2")
|
||||
require.Equal(t, 1, node1Count, "Should have exactly one instance of node1")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mix with different subnets",
|
||||
nodes: []*enode.Node{
|
||||
node2_seq1_subnet1,
|
||||
node2_seq2_subnet2, // Higher seq but different subnet
|
||||
node3_seq1_subnet1_2,
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2, 2: 1},
|
||||
expectedCount: 2, // node2 (latest) and node3
|
||||
description: "Should handle nodes with different subnet subscriptions",
|
||||
eval: nil, // Basic count validation is sufficient
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
localNode2 := createTestNodeWithID(t, "node2")
|
||||
localNode3 := createTestNodeWithID(t, "node3")
|
||||
|
||||
// Create versions of node1 with subnet 1
|
||||
setNodeSubnets(localNode1, []uint64{1})
|
||||
setNodeSeq(localNode1, 1)
|
||||
node1_seq1_valid_subnet1 := localNode1.Node()
|
||||
|
||||
// Create bad version (higher seq)
|
||||
setNodeSeq(localNode1, 2)
|
||||
node1_seq2_bad_subnet1 := localNode1.Node()
|
||||
|
||||
// Create another valid version
|
||||
setNodeSeq(localNode1, 3)
|
||||
node1_seq3_valid_subnet1 := localNode1.Node()
|
||||
|
||||
// Node2 with subnet 1
|
||||
setNodeSubnets(localNode2, []uint64{1})
|
||||
node2_seq1_valid_subnet1 := localNode2.Node()
|
||||
|
||||
// Node3 with subnet 1 and 2
|
||||
setNodeSubnets(localNode3, []uint64{1, 2})
|
||||
node3_seq1_valid_subnet1_2 := localNode3.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*enode.Node
|
||||
defectiveSubnets map[uint64]int
|
||||
expectedCount int
|
||||
description string
|
||||
eval func(t *testing.T, result []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "Valid node in subnet followed by bad version - should remove",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_valid_subnet1, // First add valid node with subnet 1
|
||||
node1_seq2_bad_subnet1, // Invalid version with higher seq - should delete
|
||||
node2_seq1_valid_subnet1, // Different valid node with subnet 1
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
expectedCount: 1, // Only node2 should remain
|
||||
description: "Should remove node from map when bad version arrives, even if it has required subnet",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq1_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should have been removed despite having subnet")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Bad node with subnet stays bad even with higher seq",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq2_bad_subnet1, // First bad node - not added
|
||||
node1_seq3_valid_subnet1, // Higher seq but same bad peer ID
|
||||
node2_seq1_valid_subnet1, // Different valid node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 1, // Only node2 (node1 remains bad)
|
||||
description: "Bad peer with subnet remains bad even with higher seq",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should remain bad despite having subnet")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mixed valid and bad nodes with subnets",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_valid_subnet1, // Add valid node1 with subnet
|
||||
node2_seq1_valid_subnet1, // Add valid node2 with subnet
|
||||
node1_seq2_bad_subnet1, // Invalid update for node1 - should remove
|
||||
node3_seq1_valid_subnet1_2, // Add valid node3 with multiple subnets
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 3}, // Need 3 peers for subnet 1
|
||||
expectedCount: 2, // Only node2 and node3 should remain
|
||||
description: "Should handle removal of nodes with subnets when they become bad",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
foundNode3 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq1_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
if node.ID() == node3_seq1_valid_subnet1_2.ID() {
|
||||
foundNode3 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should have been removed")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
require.Equal(t, true, foundNode3, "Node3 should be present")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Node with subnet marked bad stays bad for all sequences",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_valid_subnet1, // Add valid node1 with subnet
|
||||
node1_seq2_bad_subnet1, // Bad update - should remove and mark bad
|
||||
node1_seq3_valid_subnet1, // Higher seq but still same bad peer ID
|
||||
node2_seq1_valid_subnet1, // Different valid node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 1, // Only node2 (node1 stays bad)
|
||||
description: "Once marked bad, subnet peer stays bad for all sequences",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should stay bad")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Initialize flags for subnet operations
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
// Create test P2P instance
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
// Create mock service
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Mark specific node versions as "bad" to simulate filterPeer failures
|
||||
for _, node := range tt.nodes {
|
||||
if node == node1_seq2_bad_subnet1 {
|
||||
// Get peer ID from the node to mark it as bad
|
||||
peerData, _, _ := convertToAddrInfo(node)
|
||||
if peerData != nil {
|
||||
s.peers.Add(node.Record(), peerData.ID, nil, network.DirUnknown)
|
||||
// Mark as bad peer - this will make filterPeer return false
|
||||
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// callbackIterator allows us to execute callbacks at specific points during iteration
|
||||
type callbackIteratorForSubnets struct {
|
||||
nodes []*enode.Node
|
||||
index int
|
||||
callbacks map[int]func() // map from index to callback function
|
||||
}
|
||||
|
||||
func (c *callbackIteratorForSubnets) Next() bool {
|
||||
// Execute callback before checking if we can continue (if one exists)
|
||||
if callback, exists := c.callbacks[c.index]; exists {
|
||||
callback()
|
||||
}
|
||||
|
||||
return c.index < len(c.nodes)
|
||||
}
|
||||
|
||||
func (c *callbackIteratorForSubnets) Node() *enode.Node {
|
||||
if c.index >= len(c.nodes) {
|
||||
return nil
|
||||
}
|
||||
|
||||
node := c.nodes[c.index]
|
||||
c.index++
|
||||
return node
|
||||
}
|
||||
|
||||
func (c *callbackIteratorForSubnets) Close() {
|
||||
// Nothing to clean up for this simple implementation
|
||||
}
|
||||
|
||||
func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
// This test successfully triggers delete(nodeByNodeID, node.ID()) in subnets.go by:
|
||||
// 1. Processing node1_seq1 first (passes filterPeer, gets added to map
|
||||
// 2. Callback marks peer as bad before processing node1_seq2"
|
||||
// 3. Processing node1_seq2 (fails filterPeer, triggers delete since ok=true
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
// Create LocalNode with same ID but different sequences
|
||||
localNode1 := createTestNodeWithID(t, "testnode")
|
||||
setNodeSubnets(localNode1, []uint64{1})
|
||||
node1_seq1 := localNode1.Node() // Get current node
|
||||
currentSeq := node1_seq1.Seq()
|
||||
setNodeSeq(localNode1, currentSeq+1) // Increment sequence by 1
|
||||
node1_seq2 := localNode1.Node() // This should have higher seq
|
||||
|
||||
// Additional node to ensure we have enough peers to process
|
||||
localNode2 := createTestNodeWithID(t, "othernode")
|
||||
setNodeSubnets(localNode2, []uint64{1})
|
||||
node2 := localNode2.Node()
|
||||
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Create iterator with callback that marks peer as bad before processing node1_seq2
|
||||
iter := &callbackIteratorForSubnets{
|
||||
nodes: []*enode.Node{node1_seq1, node1_seq2, node2},
|
||||
index: 0,
|
||||
callbacks: map[int]func(){
|
||||
1: func() { // Before processing node1_seq2 (index 1)
|
||||
// Mark peer as bad before processing node1_seq2
|
||||
peerData, _, _ := convertToAddrInfo(node1_seq2)
|
||||
if peerData != nil {
|
||||
service.peers.Add(node1_seq2.Record(), peerData.ID, nil, network.DirUnknown)
|
||||
// Mark as bad peer - need enough increments to exceed threshold (6)
|
||||
for i := 0; i < 10; i++ {
|
||||
service.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
|
||||
|
||||
digest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run findPeersWithSubnets - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := service.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(result))
|
||||
require.Equal(t, localNode2.Node().ID(), result[0].ID()) // only node2 should remain
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"fuzz_p2p.go",
|
||||
"mock_broadcaster.go",
|
||||
"mock_host.go",
|
||||
"mock_listener.go",
|
||||
"mock_metadataprovider.go",
|
||||
"mock_peermanager.go",
|
||||
"mock_peersprovider.go",
|
||||
|
||||
@@ -167,8 +167,8 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
// BroadcastDataColumnSidecar -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -62,8 +62,8 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
128
beacon-chain/p2p/testing/mock_listener.go
Normal file
128
beacon-chain/p2p/testing/mock_listener.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
// MockListener is a mock implementation of the Listener and ListenerRebooter interfaces
|
||||
// that can be used in tests. It provides configurable behavior for all methods.
|
||||
type MockListener struct {
|
||||
LocalNodeFunc func() *enode.LocalNode
|
||||
SelfFunc func() *enode.Node
|
||||
RandomNodesFunc func() enode.Iterator
|
||||
LookupFunc func(enode.ID) []*enode.Node
|
||||
ResolveFunc func(*enode.Node) *enode.Node
|
||||
PingFunc func(*enode.Node) error
|
||||
RequestENRFunc func(*enode.Node) (*enode.Node, error)
|
||||
RebootFunc func() error
|
||||
CloseFunc func()
|
||||
|
||||
// Default implementations
|
||||
localNode *enode.LocalNode
|
||||
iterator enode.Iterator
|
||||
}
|
||||
|
||||
// NewMockListener creates a new MockListener with default implementations
|
||||
func NewMockListener(localNode *enode.LocalNode, iterator enode.Iterator) *MockListener {
|
||||
return &MockListener{
|
||||
localNode: localNode,
|
||||
iterator: iterator,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockListener) LocalNode() *enode.LocalNode {
|
||||
if m.LocalNodeFunc != nil {
|
||||
return m.LocalNodeFunc()
|
||||
}
|
||||
return m.localNode
|
||||
}
|
||||
|
||||
func (m *MockListener) Self() *enode.Node {
|
||||
if m.SelfFunc != nil {
|
||||
return m.SelfFunc()
|
||||
}
|
||||
if m.localNode != nil {
|
||||
return m.localNode.Node()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) RandomNodes() enode.Iterator {
|
||||
if m.RandomNodesFunc != nil {
|
||||
return m.RandomNodesFunc()
|
||||
}
|
||||
return m.iterator
|
||||
}
|
||||
|
||||
func (m *MockListener) Lookup(id enode.ID) []*enode.Node {
|
||||
if m.LookupFunc != nil {
|
||||
return m.LookupFunc(id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) Resolve(node *enode.Node) *enode.Node {
|
||||
if m.ResolveFunc != nil {
|
||||
return m.ResolveFunc(node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) Ping(node *enode.Node) error {
|
||||
if m.PingFunc != nil {
|
||||
return m.PingFunc(node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) RequestENR(node *enode.Node) (*enode.Node, error) {
|
||||
if m.RequestENRFunc != nil {
|
||||
return m.RequestENRFunc(node)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockListener) RebootListener() error {
|
||||
if m.RebootFunc != nil {
|
||||
return m.RebootFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) Close() {
|
||||
if m.CloseFunc != nil {
|
||||
m.CloseFunc()
|
||||
}
|
||||
}
|
||||
|
||||
// MockIterator is a mock implementation of enode.Iterator for testing
|
||||
type MockIterator struct {
|
||||
Nodes []*enode.Node
|
||||
Position int
|
||||
Closed bool
|
||||
}
|
||||
|
||||
func NewMockIterator(nodes []*enode.Node) *MockIterator {
|
||||
return &MockIterator{
|
||||
Nodes: nodes,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockIterator) Next() bool {
|
||||
if m.Closed || m.Position >= len(m.Nodes) {
|
||||
return false
|
||||
}
|
||||
m.Position++
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MockIterator) Node() *enode.Node {
|
||||
if m.Position == 0 || m.Position > len(m.Nodes) {
|
||||
return nil
|
||||
}
|
||||
return m.Nodes[m.Position-1]
|
||||
}
|
||||
|
||||
func (m *MockIterator) Close() {
|
||||
m.Closed = true
|
||||
}
|
||||
@@ -50,6 +50,7 @@ const (
|
||||
|
||||
// TestP2P represents a p2p implementation that can be used for testing.
|
||||
type TestP2P struct {
|
||||
mu sync.Mutex
|
||||
t *testing.T
|
||||
BHost host.Host
|
||||
EnodeID enode.ID
|
||||
@@ -63,6 +64,7 @@ type TestP2P struct {
|
||||
custodyInfoMut sync.RWMutex // protects custodyGroupCount and earliestAvailableSlot
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
enr *enr.Record
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
@@ -103,6 +105,7 @@ func NewTestP2P(t *testing.T, userOptions ...config.Option) *TestP2P {
|
||||
pubsub: ps,
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
peers: peerStatuses,
|
||||
enr: new(enr.Record),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -228,8 +231,8 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
@@ -241,6 +244,8 @@ func (p *TestP2P) SetStreamHandler(topic string, handler network.StreamHandler)
|
||||
|
||||
// JoinTopic will join PubSub topic, if not already joined.
|
||||
func (p *TestP2P) JoinTopic(topic string, opts ...pubsub.TopicOpt) (*pubsub.Topic, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if _, ok := p.joinedTopics[topic]; !ok {
|
||||
joinedTopic, err := p.pubsub.Join(topic, opts...)
|
||||
if err != nil {
|
||||
@@ -310,8 +315,8 @@ func (p *TestP2P) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
func (*TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
func (p *TestP2P) ENR() *enr.Record {
|
||||
return p.enr
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
|
||||
@@ -1230,6 +1230,7 @@ func (s *Service) prysmBeaconEndpoints(
|
||||
methods: []string{http.MethodGet},
|
||||
},
|
||||
{
|
||||
// Warning: no longer supported post Fulu fork
|
||||
template: "/prysm/v1/beacon/blobs",
|
||||
name: namespace + ".PublishBlobs",
|
||||
middleware: []middleware.Middleware{
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -60,7 +61,6 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -72,6 +72,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"handlers_equivocation_test.go",
|
||||
"handlers_pool_test.go",
|
||||
"handlers_state_test.go",
|
||||
"handlers_test.go",
|
||||
@@ -83,6 +84,7 @@ go_test(
|
||||
"//api:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
@@ -123,7 +125,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -32,7 +33,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -334,26 +334,26 @@ func (s *Server) GetBlockAttestationsV2(w http.ResponseWriter, r *http.Request)
|
||||
consensusAtts := blk.Block().Body().Attestations()
|
||||
|
||||
v := blk.Block().Version()
|
||||
var attStructs []interface{}
|
||||
attStructs := make([]interface{}, len(consensusAtts))
|
||||
if v >= version.Electra {
|
||||
for _, att := range consensusAtts {
|
||||
for index, att := range consensusAtts {
|
||||
a, ok := att.(*eth.AttestationElectra)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("unable to convert consensus attestations electra of type %T", att), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct := structs.AttElectraFromConsensus(a)
|
||||
attStructs = append(attStructs, attStruct)
|
||||
attStructs[index] = attStruct
|
||||
}
|
||||
} else {
|
||||
for _, att := range consensusAtts {
|
||||
for index, att := range consensusAtts {
|
||||
a, ok := att.(*eth.Attestation)
|
||||
if !ok {
|
||||
httputil.HandleError(w, fmt.Sprintf("unable to convert consensus attestation of type %T", att), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
attStruct := structs.AttFromConsensus(a)
|
||||
attStructs = append(attStructs, attStruct)
|
||||
attStructs[index] = attStruct
|
||||
}
|
||||
}
|
||||
|
||||
@@ -701,7 +701,7 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
// Validate and optionally broadcast sidecars on equivocation.
|
||||
if err := s.validateBroadcast(ctx, r, genericBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock)
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock.Block)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
@@ -855,7 +855,7 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
// Validate and optionally broadcast sidecars on equivocation.
|
||||
if err := s.validateBroadcast(ctx, r, genericBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock)
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock.Block)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
@@ -942,14 +942,13 @@ func decodePhase0JSON(body []byte) (*eth.GenericSignedBeaconBlock, error) {
|
||||
// broadcastSidecarsIfSupported broadcasts blob sidecars when an equivocated block occurs.
|
||||
func broadcastSidecarsIfSupported(ctx context.Context, s *Server, b interfaces.SignedBeaconBlock, gb *eth.GenericSignedBeaconBlock, versionHeader string) error {
|
||||
switch versionHeader {
|
||||
case version.String(version.Fulu):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetFulu().Blobs, gb.GetFulu().KzgProofs)
|
||||
case version.String(version.Electra):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetElectra().Blobs, gb.GetElectra().KzgProofs)
|
||||
case version.String(version.Deneb):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetDeneb().Blobs, gb.GetDeneb().KzgProofs)
|
||||
default:
|
||||
// other forks before Deneb do not support blob sidecars
|
||||
// forks after fulu do not support blob sidecars, instead support data columns, no need to rebroadcast
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1053,7 +1052,7 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.validateBlobSidecars(blk, blobs, proofs); err != nil {
|
||||
if err := s.validateBlobs(blk, blobs, proofs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1067,23 +1066,41 @@ func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
func (s *Server) validateBlobs(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
kzgs, err := blk.Block().Body().BlobKzgCommitments()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
commitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments")
|
||||
}
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(kzgs) {
|
||||
return errors.New("number of blobs, proofs, and commitments do not match")
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blk.Block().Slot())
|
||||
if len(blobs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("number of blobs over max, %d > %d", len(blobs), maxBlobsPerBlock)
|
||||
}
|
||||
for i, blob := range blobs {
|
||||
b := kzg4844.Blob(blob)
|
||||
if err := kzg4844.VerifyBlobProof(&b, kzg4844.Commitment(kzgs[i]), kzg4844.Proof(proofs[i])); err != nil {
|
||||
return errors.Wrap(err, "could not verify blob proof")
|
||||
if blk.Version() >= version.Fulu {
|
||||
// For Fulu blocks, proofs are cell proofs (blobs * numberOfColumns)
|
||||
expectedProofsCount := uint64(len(blobs)) * numberOfColumns
|
||||
if uint64(len(proofs)) != expectedProofsCount || len(blobs) != len(commitments) {
|
||||
return fmt.Errorf("number of blobs (%d), cell proofs (%d), and commitments (%d) do not match (expected %d cell proofs)", len(blobs), len(proofs), len(commitments), expectedProofsCount)
|
||||
}
|
||||
// For Fulu blocks, proofs are cell proofs from execution client's BlobsBundleV2
|
||||
// Verify cell proofs directly without reconstructing data column sidecars
|
||||
if err := kzg.VerifyCellKZGProofBatchFromBlobData(blobs, commitments, proofs, numberOfColumns); err != nil {
|
||||
return errors.Wrap(err, "could not verify cell proofs")
|
||||
}
|
||||
} else {
|
||||
// For pre-Fulu blocks, proofs are blob proofs (1:1 with blobs)
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(commitments) {
|
||||
return errors.Errorf("number of blobs (%d), proofs (%d), and commitments (%d) do not match", len(blobs), len(proofs), len(commitments))
|
||||
}
|
||||
// Use batch verification for better performance
|
||||
if err := kzg.VerifyBlobKZGProofBatch(blobs, commitments, proofs); err != nil {
|
||||
return errors.Wrap(err, "could not verify blob proofs")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1220,7 +1237,7 @@ func (s *Server) GetStateFork(w http.ResponseWriter, r *http.Request) {
|
||||
fork := st.Fork()
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status"+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1331,7 +1348,7 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1512,7 +1529,7 @@ func (s *Server) GetFinalityCheckpoints(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1627,6 +1644,8 @@ func (s *Server) broadcastSeenBlockSidecars(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Broadcast blob sidecars with forkchoice checking
|
||||
for _, sc := range scs {
|
||||
r, err := sc.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -1686,7 +1705,7 @@ func (s *Server) GetPendingConsolidations(w http.ResponseWriter, r *http.Request
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1742,7 +1761,7 @@ func (s *Server) GetPendingDeposits(w http.ResponseWriter, r *http.Request) {
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1798,7 +1817,7 @@ func (s *Server) GetPendingPartialWithdrawals(w http.ResponseWriter, r *http.Req
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1851,7 +1870,7 @@ func (s *Server) GetProposerLookahead(w http.ResponseWriter, r *http.Request) {
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
35
beacon-chain/rpc/eth/beacon/handlers_equivocation_test.go
Normal file
35
beacon-chain/rpc/eth/beacon/handlers_equivocation_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
rpctesting "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
// TestBlocks_NewSignedBeaconBlock_EquivocationFix tests that blocks.NewSignedBeaconBlock
|
||||
// correctly handles the fixed case where genericBlock.Block is passed instead of genericBlock
|
||||
func TestBlocks_NewSignedBeaconBlock_EquivocationFix(t *testing.T) {
|
||||
// Parse the Phase0 JSON block
|
||||
var block structs.SignedBeaconBlock
|
||||
err := json.Unmarshal([]byte(rpctesting.Phase0Block), &block)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to generic format
|
||||
genericBlock, err := block.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the FIX: pass genericBlock.Block instead of genericBlock
|
||||
// This is what our fix changed in handlers.go line 704 and 858
|
||||
_, err = blocks.NewSignedBeaconBlock(genericBlock.Block)
|
||||
require.NoError(t, err, "NewSignedBeaconBlock should work with genericBlock.Block")
|
||||
|
||||
// Test the BROKEN version: pass genericBlock directly (this should fail)
|
||||
_, err = blocks.NewSignedBeaconBlock(genericBlock)
|
||||
if err == nil {
|
||||
t.Errorf("NewSignedBeaconBlock should fail with whole genericBlock but succeeded")
|
||||
}
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (s *Server) GetStateRoot(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -125,7 +125,7 @@ func (s *Server) GetRandao(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ func (s *Server) GetSyncCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
chainMock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -40,7 +41,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
@@ -910,6 +910,100 @@ func TestGetBlockAttestations(t *testing.T) {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("empty-attestations", func(t *testing.T) {
|
||||
t.Run("v1", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Body.Attestations = []*eth.Attestation{} // Explicitly set empty attestations
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &chainMock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v1/beacon/blocks/{block_id}/attestations", nil)
|
||||
request.SetPathValue("block_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBlockAttestations(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlockAttestationsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
|
||||
// Ensure data is empty array, not null
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, 0, len(resp.Data))
|
||||
})
|
||||
|
||||
t.Run("v2-pre-electra", func(t *testing.T) {
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Body.Attestations = []*eth.Attestation{} // Explicitly set empty attestations
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
mockChainService := &chainMock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: sb},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v2/beacon/blocks/{block_id}/attestations", nil)
|
||||
request.SetPathValue("block_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBlockAttestationsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlockAttestationsV2Response{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
// Ensure data is "[]", not null
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, string(json.RawMessage("[]")), string(resp.Data))
|
||||
})
|
||||
|
||||
t.Run("v2-electra", func(t *testing.T) {
|
||||
eb := util.NewBeaconBlockFulu()
|
||||
eb.Block.Body.Attestations = []*eth.AttestationElectra{} // Explicitly set empty attestations
|
||||
esb, err := blocks.NewSignedBeaconBlock(eb)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &chainMock.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
Blocker: &testutil.MockBlocker{BlockToReturn: esb},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://foo.example/eth/v2/beacon/blocks/{block_id}/attestations", nil)
|
||||
request.SetPathValue("block_id", "head")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetBlockAttestationsV2(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetBlockAttestationsV2Response{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
|
||||
// Ensure data is "[]", not null
|
||||
require.NotNil(t, resp.Data)
|
||||
assert.Equal(t, string(json.RawMessage("[]")), string(resp.Data))
|
||||
assert.Equal(t, "fulu", resp.Version)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetBlindedBlock(t *testing.T) {
|
||||
@@ -4781,25 +4875,329 @@ func TestServer_broadcastBlobSidecars(t *testing.T) {
|
||||
require.LogsContain(t, hook, "Broadcasted blob sidecar for already seen block")
|
||||
}
|
||||
|
||||
func Test_validateBlobSidecars(t *testing.T) {
|
||||
func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
blob := util.GetRandBlob(123)
|
||||
commitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
|
||||
proof := GoKZG.KZGProof{128, 110, 116, 170, 56, 111, 126, 87, 229, 234, 211, 42, 110, 150, 129, 206, 73, 142, 167, 243, 90, 149, 240, 240, 236, 204, 143, 182, 229, 249, 81, 27, 153, 171, 83, 70, 144, 250, 42, 1, 188, 215, 71, 235, 30, 7, 175, 86}
|
||||
// Generate proper commitment and proof for the blob
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
proof, err := kzg.ComputeBlobKZGProof(&kzgBlob, commitment)
|
||||
require.NoError(t, err)
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
require.ErrorContains(t, "number of blobs, proofs, and commitments do not match", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{}))
|
||||
require.ErrorContains(t, "number of blobs (1), proofs (0), and commitments (1) do not match", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{}))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "could not verify blob proof: can't verify opening proof", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
require.ErrorContains(t, "could not verify blob proofs", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
blobs := [][]byte{}
|
||||
commitments := [][]byte{}
|
||||
proofs := [][]byte{}
|
||||
for i := 0; i < 10; i++ {
|
||||
blobs = append(blobs, blob[:])
|
||||
commitments = append(commitments, commitment[:])
|
||||
proofs = append(proofs, proof[:])
|
||||
}
|
||||
t.Run("pre-Deneb block should return early", func(t *testing.T) {
|
||||
// Create a pre-Deneb block (e.g., Capella)
|
||||
blk := util.NewBeaconBlockCapella()
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should return nil for pre-Deneb blocks regardless of blobs
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:1], proofs[:1]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block with valid single blob", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
})
|
||||
|
||||
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:6]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with exactly 6 blobs
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:6], proofs[:6]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:7]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 7 blobs when max is 6
|
||||
err = s.validateBlobs(b, blobs[:7], proofs[:7])
|
||||
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
|
||||
})
|
||||
|
||||
t.Run("Electra block with valid blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot (epoch 5+)
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with 9 blobs in Electra
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
})
|
||||
|
||||
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 10 blobs when max is 9
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
|
||||
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
testCfg.NumberOfColumns = 128 // Standard PeerDAS configuration
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
// Create Fulu block with proper cell proofs
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
|
||||
// Generate valid commitments and cell proofs for testing
|
||||
blobCount := 2
|
||||
commitments := make([][]byte, blobCount)
|
||||
fuluBlobs := make([][]byte, blobCount)
|
||||
var kzgBlobs []kzg.Blob
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := util.GetRandBlob(int64(i))
|
||||
fuluBlobs[i] = blob[:]
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
kzgBlobs = append(kzgBlobs, kzgBlob)
|
||||
|
||||
// Generate commitment
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
commitments[i] = commitment[:]
|
||||
}
|
||||
|
||||
blk.Block.Body.BlobKzgCommitments = commitments
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate cell proofs for the blobs (flattened format like execution client)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
|
||||
for blobIdx := 0; blobIdx < blobCount; blobIdx++ {
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlobs[blobIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
for colIdx := uint64(0); colIdx < numberOfColumns; colIdx++ {
|
||||
cellProofIdx := uint64(blobIdx)*numberOfColumns + colIdx
|
||||
cellProofs[cellProofIdx] = cellsAndProofs.Proofs[colIdx][:]
|
||||
}
|
||||
}
|
||||
|
||||
s := &Server{}
|
||||
// Should use cell batch verification for Fulu blocks
|
||||
require.NoError(t, s.validateBlobs(b, fuluBlobs, cellProofs))
|
||||
})
|
||||
|
||||
t.Run("Fulu block with invalid cell proof count", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
|
||||
// Create valid commitments but wrong number of cell proofs
|
||||
blobCount := 2
|
||||
commitments := make([][]byte, blobCount)
|
||||
fuluBlobs := make([][]byte, blobCount)
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := util.GetRandBlob(int64(i))
|
||||
fuluBlobs[i] = blob[:]
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
commitments[i] = commitment[:]
|
||||
}
|
||||
|
||||
blk.Block.Body.BlobKzgCommitments = commitments
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wrong number of cell proofs (should be blobCount * numberOfColumns)
|
||||
wrongCellProofs := make([][]byte, 10) // Too few proofs
|
||||
|
||||
s := &Server{}
|
||||
err = s.validateBlobs(b, fuluBlobs, wrongCellProofs)
|
||||
require.ErrorContains(t, "do not match", err)
|
||||
})
|
||||
|
||||
t.Run("Deneb block with invalid blob proof", func(t *testing.T) {
|
||||
blob := util.GetRandBlob(123)
|
||||
invalidProof := make([]byte, 48) // All zeros - invalid proof
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{}
|
||||
err = s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{invalidProof})
|
||||
require.ErrorContains(t, "could not verify blob proofs", err)
|
||||
})
|
||||
|
||||
t.Run("empty blobs and proofs should pass", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
|
||||
})
|
||||
|
||||
t.Run("BlobSchedule with progressive increases (BPO)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.FuluForkEpoch = 200
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
// Define blob schedule with progressive increases
|
||||
testCfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
}
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
s := &Server{}
|
||||
|
||||
// Test epoch 0-9: max 3 blobs
|
||||
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 5 // Epoch 0
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:3]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:3], proofs[:3]))
|
||||
|
||||
// Should fail with 4 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:4]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:4], proofs[:4])
|
||||
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
|
||||
})
|
||||
|
||||
// Test epoch 30+: max 9 blobs
|
||||
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 960 // Epoch 30
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
|
||||
// Should fail with 10 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetPendingConsolidations(t *testing.T) {
|
||||
|
||||
@@ -44,7 +44,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -222,7 +222,7 @@ func (s *Server) GetValidator(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -258,7 +258,7 @@ func (s *Server) GetValidatorBalances(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -419,7 +419,7 @@ func (s *Server) getValidatorIdentitiesJSON(
|
||||
) {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *Server) getBeaconStateV2(ctx context.Context, w http.ResponseWriter, id
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, id, s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if state is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -21,6 +22,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -40,6 +42,7 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -57,5 +60,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,11 +2,14 @@ package helpers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/lookup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -28,6 +31,22 @@ func PrepareStateFetchGRPCError(err error) error {
|
||||
return status.Errorf(codes.Internal, "Invalid state ID: %v", err)
|
||||
}
|
||||
|
||||
// HandleIsOptimisticError handles errors from IsOptimistic function calls and writes appropriate HTTP responses.
|
||||
func HandleIsOptimisticError(w http.ResponseWriter, err error) {
|
||||
var fetchErr *lookup.FetchStateError
|
||||
if errors.As(err, &fetchErr) {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
|
||||
if errors.As(err, &blockRootsNotFoundErr) {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// IndexedVerificationFailure represents a collection of verification failures.
|
||||
type IndexedVerificationFailure struct {
|
||||
Failures []*SingleIndexedVerificationFailure `json:"failures"`
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user