mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
22 Commits
refactor-f
...
bpo-api-va
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48e561ae9b | ||
|
|
ee03c7cce2 | ||
|
|
c5135f6995 | ||
|
|
29aedac113 | ||
|
|
08fb3812b7 | ||
|
|
07738dd9a4 | ||
|
|
53df29b07f | ||
|
|
00cf1f2507 | ||
|
|
6528fb9cea | ||
|
|
5021131811 | ||
|
|
26cec9d9c7 | ||
|
|
4ed90a02ef | ||
|
|
7d528c75bb | ||
|
|
e7b2953d5a | ||
|
|
acf35e849e | ||
|
|
c826d334a1 | ||
|
|
eace128ee9 | ||
|
|
9161b80a32 | ||
|
|
009a1507a1 | ||
|
|
fa71a6e117 | ||
|
|
3da40ecd9c | ||
|
|
f7f992c256 |
43
.github/workflows/check-specrefs.yml
vendored
Normal file
43
.github/workflows/check-specrefs.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Check Spec References
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check-specrefs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
|
||||
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
|
||||
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
|
||||
echo "Version mismatch between WORKSPACE and ethspecify"
|
||||
echo " WORKSPACE: $WORKSPACE_VERSION"
|
||||
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||
exit 1
|
||||
else
|
||||
echo "Versions match: $WORKSPACE_VERSION"
|
||||
fi
|
||||
|
||||
- name: Install ethspecify
|
||||
run: python3 -mpip install ethspecify
|
||||
|
||||
- name: Update spec references
|
||||
run: ethspecify process --path=specrefs
|
||||
|
||||
- name: Check for differences
|
||||
run: |
|
||||
if ! git diff --exit-code specrefs >/dev/null; then
|
||||
echo "Spec references are out-of-date!"
|
||||
echo ""
|
||||
git --no-pager diff specrefs
|
||||
exit 1
|
||||
else
|
||||
echo "Spec references are up-to-date!"
|
||||
fi
|
||||
|
||||
- name: Check spec references
|
||||
run: ethspecify check --path=specrefs
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -253,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.1"
|
||||
consensus_spec_version = "v1.6.0-alpha.4"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
|
||||
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
|
||||
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
|
||||
"general": "sha256-MaN4zu3o0vWZypUHS5r4D8WzJF4wANoadM8qm6iyDs4=",
|
||||
"minimal": "sha256-aZGNPp/bBvJgq3Wf6vyR0H6G3DOkbSuggEmOL4jEmtg=",
|
||||
"mainnet": "sha256-C7jjosvpzUgw3GPajlsWBV02ZbkZ5Uv4ikmOqfDGajI=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
|
||||
integrity = "sha256-qreawRS77l8CebiNww8z727qUItw7KlHY1Xqj7IrPdk=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -16,7 +16,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/client"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -137,24 +135,6 @@ func (c *Client) GetFork(ctx context.Context, stateId StateOrBlockId) (*ethpb.Fo
|
||||
return fr.ToConsensus()
|
||||
}
|
||||
|
||||
// GetForkSchedule retrieve all forks, past present and future, of which this node is aware.
|
||||
func (c *Client) GetForkSchedule(ctx context.Context) (forks.OrderedSchedule, error) {
|
||||
body, err := c.Get(ctx, getForkSchedulePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting fork schedule")
|
||||
}
|
||||
fsr := &forkScheduleResponse{}
|
||||
err = json.Unmarshal(body, fsr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ofs, err := fsr.OrderedForkSchedule()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("problem unmarshaling %s response", getForkSchedulePath))
|
||||
}
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
// GetConfigSpec retrieve the current configs of the network used by the beacon node.
|
||||
func (c *Client) GetConfigSpec(ctx context.Context) (*structs.GetSpecResponse, error) {
|
||||
body, err := c.Get(ctx, getConfigSpecPath)
|
||||
@@ -334,31 +314,3 @@ func (c *Client) GetBLStoExecutionChanges(ctx context.Context) (*structs.BLSToEx
|
||||
}
|
||||
return poolResponse, nil
|
||||
}
|
||||
|
||||
type forkScheduleResponse struct {
|
||||
Data []structs.Fork
|
||||
}
|
||||
|
||||
func (fsr *forkScheduleResponse) OrderedForkSchedule() (forks.OrderedSchedule, error) {
|
||||
ofs := make(forks.OrderedSchedule, 0)
|
||||
for _, d := range fsr.Data {
|
||||
epoch, err := strconv.ParseUint(d.Epoch, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing epoch %s", d.Epoch)
|
||||
}
|
||||
vSlice, err := hexutil.Decode(d.CurrentVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(vSlice) != 4 {
|
||||
return nil, fmt.Errorf("got %d byte version, expected 4 bytes. version hex=%s", len(vSlice), d.CurrentVersion)
|
||||
}
|
||||
version := bytesutil.ToBytes4(vSlice)
|
||||
ofs = append(ofs, forks.ForkScheduleEntry{
|
||||
Version: version,
|
||||
Epoch: primitives.Epoch(epoch),
|
||||
})
|
||||
}
|
||||
sort.Sort(ofs)
|
||||
return ofs, nil
|
||||
}
|
||||
|
||||
@@ -102,6 +102,7 @@ type BuilderClient interface {
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error
|
||||
Status(ctx context.Context) error
|
||||
}
|
||||
|
||||
@@ -152,7 +153,8 @@ func (c *Client) NodeURL() string {
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
// It validates that the HTTP response status matches the expectedStatus parameter.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, expectedStatus int, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||
defer func() {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -187,8 +189,8 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
log.WithError(closeErr).Error("Failed to close response body")
|
||||
}
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
err = non200Err(r)
|
||||
if r.StatusCode != expectedStatus {
|
||||
err = unexpectedStatusErr(r, expectedStatus)
|
||||
return
|
||||
}
|
||||
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
|
||||
@@ -236,7 +238,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
}
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, getOpts)
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, http.StatusOK, getOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting header from builder server")
|
||||
}
|
||||
@@ -409,7 +411,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
}
|
||||
}
|
||||
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), postOpts); err != nil {
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), http.StatusOK, postOpts); err != nil {
|
||||
return errors.Wrap(err, "do")
|
||||
}
|
||||
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
|
||||
@@ -471,7 +473,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
|
||||
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
|
||||
// blobs bundle if it is post deneb.
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusOK, postOpts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
|
||||
}
|
||||
@@ -501,6 +503,24 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
return ed, blobs, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu calls the builder API endpoint post-Fulu where relays only return status codes.
|
||||
// This method is used after the Fulu fork when MEV-boost relays no longer return execution payloads.
|
||||
func (c *Client) SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
body, postOpts, err := c.buildBlindedBlockRequest(sb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Post the blinded block - the response should only contain a status code (no payload)
|
||||
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error posting the blinded block to the builder api post-Fulu")
|
||||
}
|
||||
|
||||
// Success is indicated by no error (status 202)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) checkBlockVersion(respBytes []byte, header http.Header) (int, error) {
|
||||
var versionHeader string
|
||||
if c.sszEnabled {
|
||||
@@ -657,11 +677,11 @@ func (c *Client) Status(ctx context.Context) error {
|
||||
getOpts := func(r *http.Request) {
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, getOpts)
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, http.StatusOK, getOpts)
|
||||
return err
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
func unexpectedStatusErr(response *http.Response, expected int) error {
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
|
||||
var errMessage ErrorMessage
|
||||
var body string
|
||||
@@ -670,7 +690,7 @@ func non200Err(response *http.Response) error {
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
msg := fmt.Sprintf("expected=%d, got=%d, url=%s, body=%s", expected, response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case http.StatusUnsupportedMediaType:
|
||||
log.WithError(ErrUnsupportedMediaType).Debug(msg)
|
||||
|
||||
@@ -1555,6 +1555,89 @@ func testSignedBlindedBeaconBlockElectra(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
// Post-Fulu: only return status code, no payload
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
Body: io.NopCloser(bytes.NewBufferString("")),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("success_ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
// Post-Fulu: only return status code, no payload
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
Body: io.NopCloser(bytes.NewBufferString("")),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("error_response", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
message := ErrorMessage{
|
||||
Code: 400,
|
||||
Message: "Bad Request",
|
||||
}
|
||||
resp, err := json.Marshal(message)
|
||||
require.NoError(t, err)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Body: io.NopCloser(bytes.NewBuffer(resp)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.ErrorIs(t, err, ErrNotOK)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRequestLogger(t *testing.T) {
|
||||
wo := WithObserver(&requestLogger{})
|
||||
c, err := NewClient("localhost:3500", wo)
|
||||
@@ -1727,7 +1810,7 @@ func TestSubmitBlindedBlock_BlobsBundlerInterface(t *testing.T) {
|
||||
t.Run("Interface signature verification", func(t *testing.T) {
|
||||
// This test verifies that the SubmitBlindedBlock method signature
|
||||
// has been updated to return BlobsBundler interface
|
||||
|
||||
|
||||
client := &Client{}
|
||||
|
||||
// Verify the method exists with the correct signature
|
||||
|
||||
@@ -45,6 +45,11 @@ func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySig
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu --
|
||||
func (MockClient) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status --
|
||||
func (MockClient) Status(_ context.Context) error {
|
||||
return nil
|
||||
|
||||
@@ -1699,7 +1699,7 @@ func TestExecutionPayloadHeaderCapellaRoundtrip(t *testing.T) {
|
||||
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
|
||||
}
|
||||
|
||||
func TestErrorMessage_non200Err(t *testing.T) {
|
||||
func TestErrorMessage_unexpectedStatusErr(t *testing.T) {
|
||||
mockRequest := &http.Request{
|
||||
URL: &url.URL{Path: "example.com"},
|
||||
}
|
||||
@@ -1779,7 +1779,7 @@ func TestErrorMessage_non200Err(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := non200Err(tt.args)
|
||||
err := unexpectedStatusErr(tt.args, http.StatusOK)
|
||||
if err != nil && tt.wantMessage != "" {
|
||||
require.ErrorContains(t, tt.wantMessage, err)
|
||||
}
|
||||
|
||||
@@ -6,20 +6,20 @@ import (
|
||||
)
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
if len(blobSidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sidecars) == 1 {
|
||||
if len(blobSidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(sidecars[0].Blob),
|
||||
bytesToCommitment(sidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(sidecars[0].KzgProof))
|
||||
bytesToBlob(blobSidecars[0].Blob),
|
||||
bytesToCommitment(blobSidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(blobSidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(sidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs := make([]GoKZG.Blob, len(blobSidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
|
||||
for i, sidecar := range blobSidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
|
||||
@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
|
||||
}
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(sidecars...))
|
||||
blobSidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(blobSidecars...))
|
||||
}
|
||||
|
||||
func TestBytesToAny(t *testing.T) {
|
||||
|
||||
@@ -240,9 +240,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
@@ -308,6 +309,30 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
|
||||
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -584,7 +609,7 @@ func (s *Service) runLateBlockTasks() {
|
||||
// It returns a map where each key represents a missing BlobSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// BlobSidecars against the set of known missing sidecars.
|
||||
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
@@ -592,7 +617,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
if len(expected) > maxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
indices := store.Summary(root)
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for i := range expected {
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
@@ -607,7 +632,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
// It returns a map where each key represents a missing DataColumnSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// DataColumns against the set of known missing sidecars.
|
||||
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -619,7 +644,7 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
summary := store.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
@@ -717,7 +742,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
@@ -820,7 +845,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2889,7 +2889,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
@@ -2974,7 +2974,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
|
||||
|
||||
for i := range minimumColumnsCountToReconstruct - missingColumns {
|
||||
|
||||
@@ -17,7 +17,7 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
return errors.Wrap(err, "save data column sidecar")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlockPostFulu(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
@@ -101,6 +102,22 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu submits a blinded block to the builder relay network post-Fulu.
|
||||
// After Fulu, relays only return status codes (no payload).
|
||||
func (s *Service) SubmitBlindedBlockPostFulu(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlockPostFulu")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
if s.c == nil {
|
||||
return ErrNoBuilder
|
||||
}
|
||||
|
||||
return s.c.SubmitBlindedBlockPostFulu(ctx, b)
|
||||
}
|
||||
|
||||
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
|
||||
func (s *Service) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.GetHeader")
|
||||
|
||||
@@ -24,21 +24,22 @@ type Config struct {
|
||||
|
||||
// MockBuilderService to mock builder.
|
||||
type MockBuilderService struct {
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
BlobBundleV2 *v1.BlobsBundleV2
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
BlobBundleV2 *v1.BlobsBundleV2
|
||||
ErrSubmitBlindedBlock error
|
||||
ErrSubmitBlindedBlockPostFulu error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
}
|
||||
|
||||
// Configured for mocking.
|
||||
@@ -115,3 +116,8 @@ func (s *MockBuilderService) RegistrationByValidatorID(ctx context.Context, id p
|
||||
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
return s.ErrRegisterValidator
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return s.ErrSubmitBlindedBlockPostFulu
|
||||
}
|
||||
|
||||
@@ -41,7 +41,6 @@ go_library(
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -101,7 +100,7 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
// via the respective epoch.
|
||||
func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, blk interfaces.ReadOnlySignedBeaconBlock, blkRoot [32]byte) error {
|
||||
currentEpoch := slots.ToEpoch(blk.Block().Slot())
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
fork, err := params.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -223,6 +223,14 @@ func dataColumnsSidecars(
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
// Validate that we have enough cells and proofs for this column index
|
||||
if columnIndex >= uint64(len(cellsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds cells length %d for blob %d", columnIndex, len(cellsForRow), rowIndex)
|
||||
}
|
||||
if columnIndex >= uint64(len(proofsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds proofs length %d for blob %d", columnIndex, len(proofsForRow), rowIndex)
|
||||
}
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
|
||||
@@ -67,6 +67,55 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("cells array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with insufficient cells for the number of columns.
|
||||
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, 10), // Only 10 cells
|
||||
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail because the function will try to access columns up to NumberOfColumns
|
||||
// but we only have 10 cells/proofs.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds cells length", err)
|
||||
})
|
||||
|
||||
t.Run("proofs array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail when trying to access proof beyond index 4.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds proofs length", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
|
||||
@@ -18,8 +18,8 @@ var (
|
||||
ErrBlobsCellsProofsMismatch = errors.New("blobs and cells proofs mismatch")
|
||||
)
|
||||
|
||||
// MinimumColumnsCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
func MinimumColumnsCountToReconstruct() uint64 {
|
||||
// MinimumColumnCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
func MinimumColumnCountToReconstruct() uint64 {
|
||||
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If the number of columns is even, then we need total / 2 columns to reconstruct.
|
||||
return (params.BeaconConfig().NumberOfColumns + 1) / 2
|
||||
@@ -58,7 +58,7 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
|
||||
// Check if there is enough sidecars to reconstruct the missing columns.
|
||||
sidecarCount := len(sidecarByIndex)
|
||||
if uint64(sidecarCount) < MinimumColumnsCountToReconstruct() {
|
||||
if uint64(sidecarCount) < MinimumColumnCountToReconstruct() {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestMinimumColumnsCountToReconstruct(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the minimum number of columns needed to reconstruct.
|
||||
actual := peerdas.MinimumColumnsCountToReconstruct()
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
@@ -100,7 +100,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
t.Run("not enough columns to enable reconstruction", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
minimum := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimum := peerdas.MinimumColumnCountToReconstruct()
|
||||
_, err := peerdas.ReconstructDataColumnSidecars(verifiedRoSidecars[:minimum-1])
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"domain.go",
|
||||
"signature.go",
|
||||
"signing_root.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing",
|
||||
@@ -25,7 +24,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"domain_test.go",
|
||||
"signature_test.go",
|
||||
"signing_root_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrNilRegistration = errors.New("nil signed registration")
|
||||
|
||||
// VerifyRegistrationSignature verifies the signature of a validator's registration.
|
||||
func VerifyRegistrationSignature(
|
||||
sr *ethpb.SignedValidatorRegistrationV1,
|
||||
) error {
|
||||
if sr == nil || sr.Message == nil {
|
||||
return ErrNilRegistration
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
// Per spec, we want the fork version and genesis validator to be nil.
|
||||
// Which is genesis value and zero by default.
|
||||
sd, err := ComputeDomain(
|
||||
d,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := VerifySigningRoot(sr.Message, sr.Message.Pubkey, sr.Signature, sd); err != nil {
|
||||
return ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package signing_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
reg := ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
GasLimit: 123456,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
}
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(reg, domain)
|
||||
require.NoError(t, err)
|
||||
sk.Sign(sr[:]).Marshal()
|
||||
|
||||
sReg := ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
|
||||
|
||||
sReg.Signature = []byte("bad")
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
|
||||
|
||||
sReg.Message = nil
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
|
||||
}
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
@@ -13,7 +12,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -23,7 +21,6 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -33,7 +30,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
],
|
||||
@@ -49,7 +45,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,30 +53,25 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
|
||||
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
if len(blobSidecars) > 1 {
|
||||
firstRoot := blobSidecars[0].BlockRoot()
|
||||
for _, sidecar := range blobSidecars[1:] {
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := sidecars[0].BlockRoot()
|
||||
for _, sidecar := range sidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blobSidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromSidecar(blobSidecars[0])
|
||||
key := keyFromSidecar(sidecars[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for _, blobSidecar := range blobSidecars {
|
||||
for _, blobSidecar := range sidecars {
|
||||
if err := entry.stash(&blobSidecar); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -118,23 +118,21 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[2]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
}
|
||||
@@ -149,10 +147,8 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
@@ -161,29 +157,25 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
|
||||
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
|
||||
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, as.Persist(32+slotOOB, more[0]))
|
||||
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(1, more...))
|
||||
require.NoError(t, as.Persist(1, moreBlobSidecars...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
@@ -1,213 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
nodeID enode.ID,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
custodyGroupCount uint64,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custodyGroupCount: custodyGroupCount,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first sidecar.
|
||||
firstSidecar := dataColumnSidecars[0]
|
||||
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := firstSidecar.BlockRoot()
|
||||
for _, sidecar := range dataColumnSidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
|
||||
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := cacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
for _, sidecar := range dataColumnSidecars {
|
||||
if err := entry.stash(&sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
|
||||
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := cacheKey{slot: block.Block().Slot(), root: blockRoot}
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return errors.Wrap(err, "valid fields")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verified RO data columns - should never happen")
|
||||
}
|
||||
|
||||
if err := s.store.Save(verifiedRoDataColumns); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current epoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve peer info.
|
||||
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
commitmentsArraySize := uint64(len(commitmentsArray))
|
||||
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
if column >= commitmentsArraySize {
|
||||
return nil, errors.Errorf("custody column index %d too high (max allowed %d) - should never happen", column, commitmentsArraySize)
|
||||
}
|
||||
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
@@ -1,313 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
|
||||
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
|
||||
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[int]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
key := cacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = 100
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, commitments := range commitmentsArray {
|
||||
require.DeepEqual(t, tc.commitments, commitments)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
|
||||
}
|
||||
|
||||
return roSidecars, roDataColumns
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
@@ -99,20 +99,26 @@ func (e *blobCacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
// Check if e.scs has this index before accessing
|
||||
var sidecar *blocks.ROBlob
|
||||
if i < uint64(len(e.scs)) {
|
||||
sidecar = e.scs[i]
|
||||
}
|
||||
|
||||
if kc[i] == nil {
|
||||
if e.scs[i] != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
|
||||
if sidecar != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, sidecar.KzgCommitment)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if e.scs[i] == nil {
|
||||
if sidecar == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
|
||||
if !bytes.Equal(kc[i], sidecar.KzgCommitment) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, sidecar.KzgCommitment, kc[i])
|
||||
}
|
||||
scs = append(scs, *e.scs[i])
|
||||
scs = append(scs, *sidecar)
|
||||
}
|
||||
|
||||
return scs, nil
|
||||
|
||||
@@ -155,6 +155,41 @@ func TestFilter(t *testing.T) {
|
||||
},
|
||||
err: errCommitmentMismatch,
|
||||
},
|
||||
{
|
||||
name: "empty scs array with commitments",
|
||||
setup: func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
// This reproduces the panic condition where entry.scs is empty or nil
|
||||
// but we have commitments to check
|
||||
entry := &blobCacheEntry{
|
||||
scs: nil, // Empty/nil array that caused the panic
|
||||
}
|
||||
// Create a commitment that would trigger the check at index 0
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("commitment1"), 48),
|
||||
}
|
||||
return entry, commits, nil
|
||||
},
|
||||
err: errMissingSidecar,
|
||||
},
|
||||
{
|
||||
name: "scs array shorter than commitments",
|
||||
setup: func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
// This reproduces the condition where entry.scs exists but is shorter
|
||||
// than the number of commitments we're checking
|
||||
entry := &blobCacheEntry{
|
||||
scs: make([]*blocks.ROBlob, 2), // Only 2 slots
|
||||
}
|
||||
// Create 4 commitments, accessing index 2 and 3 would have panicked
|
||||
commits := [][]byte{
|
||||
nil,
|
||||
nil,
|
||||
bytesutil.PadTo([]byte("commitment3"), 48),
|
||||
bytesutil.PadTo([]byte("commitment4"), 48),
|
||||
}
|
||||
return entry, commits, nil
|
||||
},
|
||||
err: errMissingSidecar,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
|
||||
@@ -15,5 +15,5 @@ import (
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
||||
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
@@ -5,13 +5,12 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||
type MockAvailabilityStore struct {
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
PersistBlobsCallback func(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
@@ -25,13 +24,9 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
|
||||
}
|
||||
|
||||
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error {
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(current, blobSidecars...)
|
||||
return m.PersistBlobsCallback(current, blobSidecar...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -100,6 +100,14 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
// DataColumnStorageReader is an interface to read data column sidecars from the filesystem.
|
||||
type DataColumnStorageReader interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
var _ DataColumnStorageReader = &DataColumnStorage{}
|
||||
|
||||
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
|
||||
func WithDataColumnBasePath(base string) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
|
||||
@@ -84,12 +84,6 @@ func (s DataColumnStorageSummary) Stored() map[uint64]bool {
|
||||
return stored
|
||||
}
|
||||
|
||||
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
|
||||
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type DataColumnStorageSummarizer interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
}
|
||||
|
||||
type dataColumnStorageSummaryCache struct {
|
||||
mu sync.RWMutex
|
||||
dataColumnCount float64
|
||||
@@ -98,8 +92,6 @@ type dataColumnStorageSummaryCache struct {
|
||||
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
|
||||
}
|
||||
|
||||
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
|
||||
|
||||
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
|
||||
return &dataColumnStorageSummaryCache{
|
||||
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),
|
||||
|
||||
@@ -144,14 +144,3 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
|
||||
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
|
||||
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
|
||||
}
|
||||
|
||||
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
|
||||
c := newDataColumnStorageSummaryCache()
|
||||
for root, indices := range set {
|
||||
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -115,6 +115,17 @@ type NoHeadAccessDatabase interface {
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
@@ -131,16 +142,6 @@ type HeadAccessDatabase interface {
|
||||
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HeadBlockRoot() ([32]byte, error)
|
||||
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestSaveOrigin(t *testing.T) {
|
||||
|
||||
// Initialize genesis with mainnet config - this will load the embedded mainnet state
|
||||
require.NoError(t, genesis.Initialize(ctx, t.TempDir()))
|
||||
|
||||
|
||||
// Get the initialized genesis state
|
||||
st, err := genesis.State()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -186,6 +186,7 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
beacon.ConfigOptions = append([]params.Option{params.WithGenesisValidatorsRoot(genesis.ValidatorsRoot())}, beacon.ConfigOptions...)
|
||||
params.BeaconConfig().ApplyOptions(beacon.ConfigOptions...)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
params.LogDigests(params.BeaconConfig())
|
||||
|
||||
synchronizer := startup.NewClockSynchronizer()
|
||||
beacon.clockWaiter = synchronizer
|
||||
@@ -844,6 +845,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -965,6 +967,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
|
||||
@@ -72,7 +72,6 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
@@ -169,7 +168,6 @@ go_test(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/metadata:go_default_library",
|
||||
@@ -179,6 +177,7 @@ go_test(
|
||||
"//testing/util:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
@@ -196,7 +195,6 @@ go_test(
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -274,14 +273,8 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
return errors.New("attempted to broadcast nil light client optimistic update")
|
||||
}
|
||||
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(forkDigest)); err != nil {
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -300,13 +293,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return errors.New("attempted to broadcast nil light client finality update")
|
||||
}
|
||||
|
||||
forkDigest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(update.AttestedHeader().Beacon().Slot), s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not retrieve fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
@@ -318,15 +305,15 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
func (s *Service) BroadcastDataColumn(
|
||||
func (s *Service) BroadcastDataColumnSidecar(
|
||||
root [fieldparams.RootLength]byte,
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumn")
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
@@ -343,12 +330,12 @@ func (s *Service) BroadcastDataColumn(
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
@@ -356,7 +343,7 @@ func (s *Service) internalBroadcastDataColumn(
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
|
||||
@@ -15,12 +15,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
testpb "github.com/OffchainLabs/prysm/v6/proto/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
@@ -59,6 +60,7 @@ func TestService_Broadcast(t *testing.T) {
|
||||
topic := "/eth2/%x/testing"
|
||||
// Set a test gossip mapping for testpb.TestSimpleMessage.
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest)
|
||||
@@ -551,9 +553,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, digest)
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
@@ -617,9 +617,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
digest, err := forks.ForkDigestFromEpoch(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot), p.genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, digest)
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
@@ -718,7 +716,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
err = service.BroadcastDataColumn(emptyRoot, subnet, nil)
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, nil)
|
||||
require.ErrorContains(t, "attempted to broadcast nil", err)
|
||||
|
||||
// Subscribe to the topic.
|
||||
@@ -729,7 +727,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumn(emptyRoot, subnet, sidecar)
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
|
||||
@@ -443,20 +443,27 @@ func (s *Service) findPeers(ctx context.Context, missingPeerCount uint) ([]*enod
|
||||
return peersToDial, ctx.Err()
|
||||
}
|
||||
|
||||
// Skip peer not matching the filter.
|
||||
node := iterator.Node()
|
||||
if !s.filterPeer(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() > node.Seq() {
|
||||
if ok && existing.Seq() >= node.Seq() {
|
||||
continue // keep existing and skip.
|
||||
}
|
||||
|
||||
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
|
||||
// Skip peer not matching the filter.
|
||||
if !s.filterPeer(node) {
|
||||
if ok {
|
||||
// this means the existing peer with the lower sequence number is no longer valid
|
||||
delete(nodeByNodeID, existing.ID())
|
||||
missingPeerCount++
|
||||
}
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
// We found a new peer. Decrease the missing peer count.
|
||||
nodeByNodeID[node.ID()] = node
|
||||
missingPeerCount--
|
||||
}
|
||||
|
||||
@@ -585,8 +592,11 @@ func (s *Service) createLocalNode(
|
||||
localNode.SetFallbackIP(ipAddr)
|
||||
localNode.SetFallbackUDP(udpPort)
|
||||
|
||||
localNode, err = addForkEntry(localNode, s.genesisTime, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
current := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
next := params.NextNetworkScheduleEntry(currentEpoch)
|
||||
if err := updateENR(localNode, current, next); err != nil {
|
||||
return nil, errors.Wrap(err, "could not add eth2 fork version entry to enr")
|
||||
}
|
||||
|
||||
@@ -707,7 +717,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
// Ignore nodes that don't match our fork digest.
|
||||
nodeENR := node.Record()
|
||||
if s.genesisValidatorsRoot != nil {
|
||||
if err := s.compareForkENR(nodeENR); err != nil {
|
||||
if err := compareForkENR(s.dv5Listener.LocalNode().Node().Record(), nodeENR); err != nil {
|
||||
log.WithError(err).Trace("Fork ENR mismatches between peer and local node")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
mathRand "math/rand"
|
||||
"net"
|
||||
@@ -58,6 +60,81 @@ func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
|
||||
return ipAddr, pkey
|
||||
}
|
||||
|
||||
// createTestNodeWithID creates a LocalNode for testing with deterministic private key
|
||||
// This is needed for deduplication tests where we need the same node ID across different sequence numbers
|
||||
func createTestNodeWithID(t *testing.T, id string) *enode.LocalNode {
|
||||
// Create a deterministic reader based on the ID for consistent key generation
|
||||
h := sha256.New()
|
||||
h.Write([]byte(id))
|
||||
seedBytes := h.Sum(nil)
|
||||
|
||||
// Create a deterministic reader using the seed
|
||||
deterministicReader := bytes.NewReader(seedBytes)
|
||||
|
||||
// Generate the private key using the same approach as the production code
|
||||
privKey, _, err := crypto.GenerateSecp256k1Key(deterministicReader)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to ECDSA private key for enode usage
|
||||
ecdsaPrivKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(privKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
|
||||
localNode := enode.NewLocalNode(db, ecdsaPrivKey)
|
||||
|
||||
// Set basic properties
|
||||
localNode.SetStaticIP(net.ParseIP("127.0.0.1"))
|
||||
localNode.Set(enr.TCP(3000))
|
||||
localNode.Set(enr.UDP(3000))
|
||||
localNode.Set(enr.WithEntry(eth2EnrKey, make([]byte, 16)))
|
||||
|
||||
return localNode
|
||||
}
|
||||
|
||||
// createTestNodeRandom creates a LocalNode for testing using the existing createAddrAndPrivKey function
|
||||
func createTestNodeRandom(t *testing.T) *enode.LocalNode {
|
||||
_, privKey := createAddrAndPrivKey(t)
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
|
||||
localNode := enode.NewLocalNode(db, privKey)
|
||||
|
||||
// Set basic properties
|
||||
localNode.SetStaticIP(net.ParseIP("127.0.0.1"))
|
||||
localNode.Set(enr.TCP(3000))
|
||||
localNode.Set(enr.UDP(3000))
|
||||
localNode.Set(enr.WithEntry(eth2EnrKey, make([]byte, 16)))
|
||||
|
||||
return localNode
|
||||
}
|
||||
|
||||
// setNodeSeq updates a LocalNode to have the specified sequence number
|
||||
func setNodeSeq(localNode *enode.LocalNode, seq uint64) {
|
||||
// Force set the sequence number - we need to update the record seq-1 times
|
||||
// because it starts at 1
|
||||
currentSeq := localNode.Node().Seq()
|
||||
for currentSeq < seq {
|
||||
localNode.Set(enr.WithEntry("dummy", currentSeq))
|
||||
currentSeq++
|
||||
}
|
||||
}
|
||||
|
||||
// setNodeSubnets sets the attestation subnets for a LocalNode
|
||||
func setNodeSubnets(localNode *enode.LocalNode, attSubnets []uint64) {
|
||||
if len(attSubnets) > 0 {
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, subnet := range attSubnets {
|
||||
bitV.SetBitAt(subnet, true)
|
||||
}
|
||||
localNode.Set(enr.WithEntry(attSubnetEnrKey, &bitV))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateListener(t *testing.T) {
|
||||
port := 1024
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
@@ -241,7 +318,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
|
||||
// Check fork is set.
|
||||
fork := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2ENRKey, fork)))
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2EnrKey, fork)))
|
||||
require.NotEmpty(t, *fork)
|
||||
|
||||
// Check att subnets.
|
||||
@@ -492,7 +569,7 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
|
||||
node := enode.NewLocalNode(db, key)
|
||||
node.Set(enr.IPv4{127, 0, 0, 1})
|
||||
node.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
|
||||
s := &Service{dv5Listener: mockListener{localNode: node}}
|
||||
s := &Service{dv5Listener: testp2p.NewMockListener(node, nil)}
|
||||
|
||||
multiAddresses, err := s.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
@@ -517,7 +594,7 @@ func TestDiscoveryV5_SeqNumber(t *testing.T) {
|
||||
node := enode.NewLocalNode(db, key)
|
||||
node.Set(enr.IPv4{127, 0, 0, 1})
|
||||
currentSeq := node.Seq()
|
||||
s := &Service{dv5Listener: mockListener{localNode: node}}
|
||||
s := &Service{dv5Listener: testp2p.NewMockListener(node, nil)}
|
||||
_, err = s.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
newSeq := node.Seq()
|
||||
@@ -529,7 +606,7 @@ func TestDiscoveryV5_SeqNumber(t *testing.T) {
|
||||
nodeTwo.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
|
||||
seqTwo := nodeTwo.Seq()
|
||||
assert.NotEqual(t, seqTwo, newSeq)
|
||||
sTwo := &Service{dv5Listener: mockListener{localNode: nodeTwo}}
|
||||
sTwo := &Service{dv5Listener: testp2p.NewMockListener(nodeTwo, nil)}
|
||||
_, err = sTwo.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, seqTwo+1, nodeTwo.Seq())
|
||||
@@ -886,3 +963,291 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
// Reset the config.
|
||||
params.OverrideBeaconConfig(defaultCfg)
|
||||
}
|
||||
|
||||
func TestFindPeers_NodeDeduplication(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create LocalNodes and manipulate sequence numbers
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
localNode2 := createTestNodeWithID(t, "node2")
|
||||
localNode3 := createTestNodeWithID(t, "node3")
|
||||
|
||||
// Create different sequence versions of node1
|
||||
setNodeSeq(localNode1, 1)
|
||||
node1_seq1 := localNode1.Node()
|
||||
setNodeSeq(localNode1, 2)
|
||||
node1_seq2 := localNode1.Node() // Same ID, higher seq
|
||||
setNodeSeq(localNode1, 3)
|
||||
node1_seq3 := localNode1.Node() // Same ID, even higher seq
|
||||
|
||||
// Other nodes with seq 1
|
||||
node2_seq1 := localNode2.Node()
|
||||
node3_seq1 := localNode3.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*enode.Node
|
||||
missingPeers uint
|
||||
expectedCount int
|
||||
description string
|
||||
eval func(t *testing.T, result []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "No duplicates - all unique nodes",
|
||||
nodes: []*enode.Node{
|
||||
node2_seq1,
|
||||
node3_seq1,
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should return all unique nodes without deduplication",
|
||||
eval: nil, // No special validation needed
|
||||
},
|
||||
{
|
||||
name: "Duplicate with lower seq comes first - should replace",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1,
|
||||
node1_seq2, // Higher seq, should replace
|
||||
node2_seq1, // Different node added after duplicates are processed
|
||||
},
|
||||
missingPeers: 2, // Need 2 peers so we process all nodes
|
||||
expectedCount: 2, // Should get node1 (with higher seq) and node2
|
||||
description: "Should keep node with higher sequence number when duplicate found",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have node2 and node1 with higher seq (node1_seq2)
|
||||
foundNode1WithHigherSeq := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2.ID() {
|
||||
require.Equal(t, node1_seq2.Seq(), node.Seq(), "Node1 should have higher seq")
|
||||
foundNode1WithHigherSeq = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with higher seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with higher seq comes first - should keep existing",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq3, // Higher seq
|
||||
node1_seq2, // Lower seq, should be skipped (continue branch)
|
||||
node1_seq1, // Even lower seq, should also be skipped (continue branch)
|
||||
node2_seq1, // Different node added after duplicates are processed
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should keep existing node when it has higher sequence number and skip all lower seq duplicates",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have kept the node with highest seq (node1_seq3)
|
||||
foundNode1WithHigherSeq := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3.ID() {
|
||||
require.Equal(t, node1_seq3.Seq(), node.Seq(), "Node1 should have highest seq")
|
||||
foundNode1WithHigherSeq = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple duplicates with increasing seq",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1,
|
||||
node1_seq2, // Should replace seq1
|
||||
node1_seq3, // Should replace seq2
|
||||
node2_seq1, // Different node added after duplicates are processed
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should keep updating to highest sequence number",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have the node with highest seq (node1_seq3)
|
||||
foundNode1WithHigherSeq := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3.ID() {
|
||||
require.Equal(t, node1_seq3.Seq(), node.Seq(), "Node1 should have highest seq")
|
||||
foundNode1WithHigherSeq = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with equal seq comes after - should skip",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq2, // First occurrence
|
||||
node1_seq2, // Same exact node instance, should be skipped (continue branch for >= case)
|
||||
node2_seq1, // Different node
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should skip duplicate with equal sequence number",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have exactly one instance of node1_seq2 and one instance of node2_seq1
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2.ID() {
|
||||
require.Equal(t, node1_seq2.Seq(), node.Seq(), "Node1 should have the expected seq")
|
||||
require.Equal(t, false, foundNode1, "Should have only one instance of node1") // Ensure no duplicates
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1, "Should have node1")
|
||||
require.Equal(t, true, foundNode2, "Should have node2")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mix of unique and duplicate nodes",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1,
|
||||
node2_seq1,
|
||||
node1_seq2, // Should replace node1_seq1
|
||||
node3_seq1,
|
||||
node1_seq3, // Should replace node1_seq2
|
||||
},
|
||||
missingPeers: 3,
|
||||
expectedCount: 3,
|
||||
description: "Should handle mix of unique nodes and duplicates correctly",
|
||||
eval: nil, // Basic count validation is sufficient
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
},
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeers(ctxWithTimeout, tt.missingPeers)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// callbackIterator allows us to execute callbacks at specific points during iteration
|
||||
type callbackIterator struct {
|
||||
nodes []*enode.Node
|
||||
index int
|
||||
callbacks map[int]func() // map from index to callback function
|
||||
}
|
||||
|
||||
func (c *callbackIterator) Next() bool {
|
||||
// Execute callback before checking if we can continue (if one exists)
|
||||
if callback, exists := c.callbacks[c.index]; exists {
|
||||
callback()
|
||||
}
|
||||
|
||||
return c.index < len(c.nodes)
|
||||
}
|
||||
|
||||
func (c *callbackIterator) Node() *enode.Node {
|
||||
if c.index >= len(c.nodes) {
|
||||
return nil
|
||||
}
|
||||
|
||||
node := c.nodes[c.index]
|
||||
c.index++
|
||||
return node
|
||||
}
|
||||
|
||||
func (c *callbackIterator) Close() {
|
||||
// Nothing to clean up for this simple implementation
|
||||
}
|
||||
|
||||
func TestFindPeers_received_bad_existing_node(t *testing.T) {
|
||||
// This test successfully triggers delete(nodeByNodeID, node.ID()) in subnets.go by:
|
||||
// 1. Processing node1_seq1 first (passes filterPeer, gets added to map
|
||||
// 2. Callback marks peer as bad before processing node1_seq2"
|
||||
// 3. Processing node1_seq2 (fails filterPeer, triggers delete since ok=true
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create LocalNode with same ID but different sequences
|
||||
localNode1 := createTestNodeWithID(t, "testnode")
|
||||
node1_seq1 := localNode1.Node() // Get current node
|
||||
currentSeq := node1_seq1.Seq()
|
||||
setNodeSeq(localNode1, currentSeq+1) // Increment sequence by 1
|
||||
node1_seq2 := localNode1.Node() // This should have higher seq
|
||||
|
||||
// Additional node to ensure we have enough peers to process
|
||||
localNode2 := createTestNodeWithID(t, "othernode")
|
||||
node2 := localNode2.Node()
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
},
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Create iterator with callback that marks peer as bad before processing node1_seq2
|
||||
iter := &callbackIterator{
|
||||
nodes: []*enode.Node{node1_seq1, node1_seq2, node2},
|
||||
index: 0,
|
||||
callbacks: map[int]func(){
|
||||
1: func() { // Before processing node1_seq2 (index 1)
|
||||
// Mark peer as bad before processing node1_seq2
|
||||
peerData, _, _ := convertToAddrInfo(node1_seq2)
|
||||
if peerData != nil {
|
||||
service.peers.Add(node1_seq2.Record(), peerData.ID, nil, network.DirUnknown)
|
||||
// Mark as bad peer - need enough increments to exceed threshold (6)
|
||||
for i := 0; i < 10; i++ {
|
||||
service.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
|
||||
|
||||
// Run findPeers - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := service.findPeers(ctxWithTimeout, 3)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(result))
|
||||
}
|
||||
|
||||
@@ -3,12 +3,9 @@ package p2p
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
@@ -16,8 +13,17 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ENR key used for Ethereum consensus-related fork data.
|
||||
var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
|
||||
var (
|
||||
errForkScheduleMismatch = errors.New("peer fork schedule incompatible")
|
||||
errCurrentDigestMismatch = errors.Wrap(errForkScheduleMismatch, "current_fork_digest mismatch")
|
||||
errNextVersionMismatch = errors.Wrap(errForkScheduleMismatch, "next_fork_version mismatch")
|
||||
errNextDigestMismatch = errors.Wrap(errForkScheduleMismatch, "nfd (next fork digest) mismatch")
|
||||
)
|
||||
|
||||
const (
|
||||
eth2EnrKey = "eth2" // The `eth2` ENR entry advertizes the node's view of the fork schedule with an ssz-encoded ENRForkID value.
|
||||
nfdEnrKey = "nfd" // The `nfd` ENR entry separately advertizes the "next fork digest" aspect of the fork schedule.
|
||||
)
|
||||
|
||||
// ForkDigest returns the current fork digest of
|
||||
// the node according to the local clock.
|
||||
@@ -25,97 +31,136 @@ func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
if !s.isInitialized() {
|
||||
return [4]byte{}, errors.New("state is not initialized")
|
||||
}
|
||||
return forks.CreateForkDigest(s.genesisTime, s.genesisValidatorsRoot)
|
||||
|
||||
currentSlot := slots.CurrentSlot(s.genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
return params.ForkDigest(currentEpoch), nil
|
||||
}
|
||||
|
||||
// Compares fork ENRs between an incoming peer's record and our node's
|
||||
// local record values for current and next fork version/epoch.
|
||||
func (s *Service) compareForkENR(record *enr.Record) error {
|
||||
currentRecord := s.dv5Listener.LocalNode().Node().Record()
|
||||
peerForkENR, err := forkEntry(record)
|
||||
func compareForkENR(self, peer *enr.Record) error {
|
||||
peerEntry, err := forkEntry(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentForkENR, err := forkEntry(currentRecord)
|
||||
selfEntry, err := forkEntry(self)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enrString, err := SerializeENR(record)
|
||||
peerString, err := SerializeENR(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Clients SHOULD connect to peers with current_fork_digest, next_fork_version,
|
||||
// and next_fork_epoch that match local values.
|
||||
if !bytes.Equal(peerForkENR.CurrentForkDigest, currentForkENR.CurrentForkDigest) {
|
||||
return fmt.Errorf(
|
||||
if !bytes.Equal(peerEntry.CurrentForkDigest, selfEntry.CurrentForkDigest) {
|
||||
return errors.Wrapf(errCurrentDigestMismatch,
|
||||
"fork digest of peer with ENR %s: %v, does not match local value: %v",
|
||||
enrString,
|
||||
peerForkENR.CurrentForkDigest,
|
||||
currentForkENR.CurrentForkDigest,
|
||||
peerString,
|
||||
peerEntry.CurrentForkDigest,
|
||||
selfEntry.CurrentForkDigest,
|
||||
)
|
||||
}
|
||||
|
||||
// Clients MAY connect to peers with the same current_fork_version but a
|
||||
// different next_fork_version/next_fork_epoch. Unless ENRForkID is manually
|
||||
// updated to matching prior to the earlier next_fork_epoch of the two clients,
|
||||
// these type of connecting clients will be unable to successfully interact
|
||||
// starting at the earlier next_fork_epoch.
|
||||
if peerForkENR.NextForkEpoch != currentForkENR.NextForkEpoch {
|
||||
if peerEntry.NextForkEpoch != selfEntry.NextForkEpoch {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerNextForkEpoch": peerForkENR.NextForkEpoch,
|
||||
"peerENR": enrString,
|
||||
"peerNextForkEpoch": peerEntry.NextForkEpoch,
|
||||
"peerNextForkVersion": peerEntry.NextForkVersion,
|
||||
"peerENR": peerString,
|
||||
}).Trace("Peer matches fork digest but has different next fork epoch")
|
||||
// We allow the connection because we have a different view of the next fork epoch. This
|
||||
// could be due to peers that have no upgraded ahead of a fork or BPO schedule change, so
|
||||
// we allow the connection to continue until the fork boundary.
|
||||
return nil
|
||||
}
|
||||
if !bytes.Equal(peerForkENR.NextForkVersion, currentForkENR.NextForkVersion) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerNextForkVersion": peerForkENR.NextForkVersion,
|
||||
"peerENR": enrString,
|
||||
}).Trace("Peer matches fork digest but has different next fork version")
|
||||
|
||||
// Since we agree on the next fork epoch, we require next fork version to also be in agreement.
|
||||
if !bytes.Equal(peerEntry.NextForkVersion, selfEntry.NextForkVersion) {
|
||||
return errors.Wrapf(errNextVersionMismatch,
|
||||
"next fork version of peer with ENR %s: %#x, does not match local value: %#x",
|
||||
peerString, peerEntry.NextForkVersion, selfEntry.NextForkVersion)
|
||||
}
|
||||
|
||||
// Fulu adds the following to the spec:
|
||||
// ---
|
||||
// A new entry is added to the ENR under the key nfd, short for next fork digest. This entry
|
||||
// communicates the digest of the next scheduled fork, regardless of whether it is a regular
|
||||
// or a Blob-Parameters-Only fork. This new entry MUST be added once FULU_FORK_EPOCH is assigned
|
||||
// any value other than FAR_FUTURE_EPOCH. Adding this entry prior to the Fulu fork will not
|
||||
// impact peering as nodes will ignore unknown ENR entries and nfd mismatches do not cause
|
||||
// disconnects.
|
||||
// When discovering and interfacing with peers, nodes MUST evaluate nfd alongside their existing
|
||||
// consideration of the ENRForkID::next_* fields under the eth2 key, to form a more accurate
|
||||
// view of the peer's intended next fork for the purposes of sustained peering. If there is a
|
||||
// mismatch, the node MUST NOT disconnect before the fork boundary, but it MAY disconnect
|
||||
// at/after the fork boundary.
|
||||
|
||||
// Nodes unprepared to follow the Fulu fork will be unaware of nfd entries. However, their
|
||||
// existing comparison of eth2 entries (concretely next_fork_epoch) is sufficient to detect
|
||||
// upcoming divergence.
|
||||
// ---
|
||||
|
||||
// Because this is a new in-bound connection, we lean into the pre-fulu point that clients
|
||||
// MAY connect to peers with the same current_fork_version but a different
|
||||
// next_fork_version/next_fork_epoch, which implies we can chose to not connect to them when these
|
||||
// don't match.
|
||||
//
|
||||
// Given that the next_fork_epoch matches, we will require the next_fork_digest to match.
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
peerNFD, selfNFD := nfd(peer), nfd(self)
|
||||
if peerNFD != selfNFD {
|
||||
return errors.Wrapf(errNextDigestMismatch,
|
||||
"next fork digest of peer with ENR %s: %v, does not match local value: %v",
|
||||
peerString, peerNFD, selfNFD)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds a fork entry as an ENR record under the Ethereum consensus EnrKey for
|
||||
// the local node. The fork entry is an ssz-encoded enrForkID type
|
||||
// which takes into account the current fork version from the current
|
||||
// epoch to create a fork digest, the next fork version,
|
||||
// and the next fork epoch.
|
||||
func addForkEntry(
|
||||
node *enode.LocalNode,
|
||||
genesisTime time.Time,
|
||||
genesisValidatorsRoot []byte,
|
||||
) (*enode.LocalNode, error) {
|
||||
digest, err := forks.CreateForkDigest(genesisTime, genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(genesisTime)
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
if prysmTime.Now().Before(genesisTime) {
|
||||
currentEpoch = 0
|
||||
}
|
||||
nextForkVersion, nextForkEpoch, err := forks.NextForkData(currentEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) error {
|
||||
enrForkID := &pb.ENRForkID{
|
||||
CurrentForkDigest: digest[:],
|
||||
NextForkVersion: nextForkVersion[:],
|
||||
NextForkEpoch: nextForkEpoch,
|
||||
CurrentForkDigest: entry.ForkDigest[:],
|
||||
NextForkVersion: next.ForkVersion[:],
|
||||
NextForkEpoch: next.Epoch,
|
||||
}
|
||||
if entry.Epoch == next.Epoch {
|
||||
enrForkID.NextForkEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"CurrentForkDigest": fmt.Sprintf("%#x", enrForkID.CurrentForkDigest),
|
||||
"NextForkVersion": fmt.Sprintf("%#x", enrForkID.NextForkVersion),
|
||||
"NextForkEpoch": fmt.Sprintf("%d", enrForkID.NextForkEpoch),
|
||||
}
|
||||
if params.BeaconConfig().FuluForkEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if entry.ForkDigest == next.ForkDigest {
|
||||
node.Set(enr.WithEntry(nfdEnrKey, make([]byte, len(next.ForkDigest))))
|
||||
} else {
|
||||
node.Set(enr.WithEntry(nfdEnrKey, next.ForkDigest[:]))
|
||||
}
|
||||
logFields["NextForkDigest"] = fmt.Sprintf("%#x", next.ForkDigest)
|
||||
}
|
||||
log.WithFields(logFields).Info("Updating ENR Fork ID")
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
forkEntry := enr.WithEntry(eth2ENRKey, enc)
|
||||
forkEntry := enr.WithEntry(eth2EnrKey, enc)
|
||||
node.Set(forkEntry)
|
||||
return node, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieves an enrForkID from an ENR record by key lookup
|
||||
// under the Ethereum consensus EnrKey
|
||||
func forkEntry(record *enr.Record) (*pb.ENRForkID, error) {
|
||||
sszEncodedForkEntry := make([]byte, 16)
|
||||
entry := enr.WithEntry(eth2ENRKey, &sszEncodedForkEntry)
|
||||
entry := enr.WithEntry(eth2EnrKey, &sszEncodedForkEntry)
|
||||
err := record.Load(entry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -126,3 +171,15 @@ func forkEntry(record *enr.Record) (*pb.ENRForkID, error) {
|
||||
}
|
||||
return forkEntry, nil
|
||||
}
|
||||
|
||||
// nfd retrieves the value of the `nfd` ("next fork digest") key from an ENR record.
|
||||
func nfd(record *enr.Record) [4]byte {
|
||||
digest := [4]byte{}
|
||||
entry := enr.WithEntry(nfdEnrKey, &digest)
|
||||
if err := record.Load(entry); err != nil {
|
||||
// Treat a missing nfd entry as an empty digest.
|
||||
// We do this to avoid errors when checking peers that have not upgraded for fulu.
|
||||
return [4]byte{}
|
||||
}
|
||||
return digest
|
||||
}
|
||||
|
||||
@@ -8,254 +8,152 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestStartDiscv5_DifferentForkDigests(t *testing.T) {
|
||||
const port = 2000
|
||||
func TestCompareForkENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, fieldparams.RootLength)
|
||||
db := testDB.SetupDB(t)
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
_, k := createAddrAndPrivKey(t)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
self := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(self, current, next))
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
DB: db,
|
||||
cases := []struct {
|
||||
name string
|
||||
expectErr error
|
||||
expectLog string
|
||||
node func(t *testing.T) *enode.Node
|
||||
}{
|
||||
{
|
||||
name: "match",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(peer, current, next))
|
||||
return peer.Node()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "current digest mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
testDigest := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, current.ForkDigest, testDigest, "ensure test fork digest is unique")
|
||||
currentCopy := current
|
||||
currentCopy.ForkDigest = testDigest
|
||||
require.NoError(t, updateENR(peer, currentCopy, next))
|
||||
return peer.Node()
|
||||
},
|
||||
expectErr: errCurrentDigestMismatch,
|
||||
},
|
||||
{
|
||||
name: "next_fork_epoch match, next_fork_version mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
testVersion := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, next.ForkVersion, testVersion, "ensure test fork version is unique")
|
||||
nextCopy := next
|
||||
nextCopy.ForkVersion = testVersion
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
expectErr: errNextVersionMismatch,
|
||||
},
|
||||
{
|
||||
name: "next fork epoch mismatch, next fork digest mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
nextCopy := next
|
||||
// next epoch does not match, and neither does the next fork digest.
|
||||
nextCopy.Epoch = nextCopy.Epoch + 1
|
||||
nfd := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, next.ForkDigest, nfd)
|
||||
//peer.Set(enr.WithEntry(nfdEnrKey, nfd[:]))
|
||||
nextCopy.ForkDigest = nfd
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
// no error because we allow a different next fork version / digest if the next fork epoch does not match
|
||||
},
|
||||
{
|
||||
name: "next fork epoch -match-, next fork digest mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
nextCopy := next
|
||||
nfd := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
// next epoch *does match*, but the next fork digest doesn't - so we should get an error.
|
||||
require.NotEqual(t, next.ForkDigest, nfd)
|
||||
nextCopy.ForkDigest = nfd
|
||||
//peer.Set(enr.WithEntry(nfdEnrKey, nfd[:]))
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
expectErr: errNextDigestMismatch,
|
||||
},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
DB: db,
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
for i := 1; i <= 5; i++ {
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
|
||||
// We give every peer a different genesis validators root, which
|
||||
// will cause each peer to have a different ForkDigest, preventing
|
||||
// them from connecting according to our discovery rules for Ethereum consensus.
|
||||
root := make([]byte, 32)
|
||||
copy(root, strconv.Itoa(port))
|
||||
s = &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: root,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
peer := c.node(t)
|
||||
err := compareForkENR(self.Node().Record(), peer.Record())
|
||||
if c.expectErr != nil {
|
||||
require.ErrorIs(t, err, c.expectErr, "Expected error to match")
|
||||
} else {
|
||||
require.NoError(t, err, "Expected no error comparing fork ENRs")
|
||||
}
|
||||
})
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
for _, listener := range listeners {
|
||||
listener.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the nodes to have their local routing tables to be populated with the other nodes
|
||||
time.Sleep(discoveryWaitTime)
|
||||
|
||||
lastListener := listeners[len(listeners)-1]
|
||||
nodes := lastListener.Lookup(bootNode.ID())
|
||||
if len(nodes) < 4 {
|
||||
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
|
||||
"Expected more than or equal to %d but got %d", 4, len(nodes))
|
||||
}
|
||||
|
||||
// Now, we start a new p2p service. It should have no peers aside from the
|
||||
// bootnode given all nodes provided by discv5 will have different fork digests.
|
||||
cfg.UDPPort = 14000
|
||||
cfg.TCPPort = 14001
|
||||
cfg.MaxPeers = 30
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
|
||||
addrs := make([]ma.Multiaddr, 0)
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
|
||||
// We should not have valid peers if the fork digest mismatched.
|
||||
assert.Equal(t, 0, len(addrs), "Expected 0 valid peers")
|
||||
require.NoError(t, s.Stop())
|
||||
}
|
||||
|
||||
func TestStartDiscv5_SameForkDigests_DifferentNextForkData(t *testing.T) {
|
||||
const port = 2000
|
||||
|
||||
func TestNfdSetAndLoad(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
hook := logTest.NewGlobal()
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{UDPPort: uint(port), PingInterval: testPingInterval, DisableLivenessCheck: true, DB: db},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
bootListener, err := s.createListener(ipAddr, pkey)
|
||||
require.NoError(t, err)
|
||||
defer bootListener.Close()
|
||||
|
||||
// Allow bootnode's table to have its initial refresh. This allows
|
||||
// inbound nodes to be added in.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
bootNode := bootListener.Self()
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
UDPPort: uint(port),
|
||||
PingInterval: testPingInterval,
|
||||
DisableLivenessCheck: true,
|
||||
DB: db,
|
||||
}
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
for i := 1; i <= 5; i++ {
|
||||
port := 3000 + i
|
||||
cfg.UDPPort = uint(port)
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
nextForkEpoch := primitives.Epoch(i)
|
||||
c.ForkVersionSchedule[[4]byte{'A', 'B', 'C', 'D'}] = nextForkEpoch
|
||||
params.OverrideBeaconConfig(c)
|
||||
|
||||
// We give every peer a different genesis validators root, which
|
||||
// will cause each peer to have a different ForkDigest, preventing
|
||||
// them from connecting according to our discovery rules for Ethereum consensus.
|
||||
s = &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
for _, listener := range listeners {
|
||||
listener.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the nodes to have their local routing tables to be populated with the other nodes
|
||||
time.Sleep(discoveryWaitTime)
|
||||
|
||||
lastListener := listeners[len(listeners)-1]
|
||||
nodes := lastListener.Lookup(bootNode.ID())
|
||||
if len(nodes) < 4 {
|
||||
t.Errorf("The node's local table doesn't have the expected number of nodes. "+
|
||||
"Expected more than or equal to %d but got %d", 4, len(nodes))
|
||||
}
|
||||
|
||||
// Now, we start a new p2p service. It should have no peers aside from the
|
||||
// bootnode given all nodes provided by discv5 will have different fork digests.
|
||||
cfg.UDPPort = 14000
|
||||
cfg.TCPPort = 14001
|
||||
cfg.MaxPeers = 30
|
||||
cfg.StateNotifier = &mock.MockStateNotifier{}
|
||||
s, err = NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.genesisTime = genesisTime
|
||||
s.genesisValidatorsRoot = make([]byte, 32)
|
||||
s.dv5Listener = lastListener
|
||||
addrs := make([]ma.Multiaddr, 0, len(nodes))
|
||||
|
||||
for _, node := range nodes {
|
||||
if s.filterPeer(node) {
|
||||
nodeAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
require.NoError(t, err)
|
||||
addrs = append(addrs, nodeAddrs...)
|
||||
}
|
||||
}
|
||||
if len(addrs) == 0 {
|
||||
t.Error("Expected to have valid peers, got 0")
|
||||
}
|
||||
|
||||
require.LogsContain(t, hook, "Peer matches fork digest but has different next fork epoch")
|
||||
require.NoError(t, s.Stop())
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
_, k := createAddrAndPrivKey(t)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next.ForkDigest = [4]byte{0xFF, 0xFF, 0xFF, 0xFF} // Ensure a unique digest for testing.
|
||||
self := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(self, current, next))
|
||||
n := nfd(self.Node().Record())
|
||||
assert.Equal(t, next.ForkDigest, n, "Expected nfd to match next fork digest")
|
||||
}
|
||||
|
||||
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
c := params.BeaconConfig().Copy()
|
||||
c.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion): 0,
|
||||
{0, 0, 0, 1}: 1,
|
||||
}
|
||||
nextForkEpoch := primitives.Epoch(1)
|
||||
nextForkVersion := []byte{0, 0, 0, 1}
|
||||
params.OverrideBeaconConfig(c)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
genesisTime := time.Now()
|
||||
genesisValidatorsRoot := make([]byte, 32)
|
||||
digest, err := forks.CreateForkDigest(genesisTime, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
enrForkID := &pb.ENRForkID{
|
||||
CurrentForkDigest: digest[:],
|
||||
NextForkVersion: nextForkVersion,
|
||||
NextForkEpoch: nextForkEpoch,
|
||||
CurrentForkDigest: current.ForkDigest[:],
|
||||
NextForkVersion: next.ForkVersion[:],
|
||||
NextForkEpoch: next.Epoch,
|
||||
}
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
entry := enr.WithEntry(eth2ENRKey, enc)
|
||||
// In epoch 1 of current time, the fork version should be
|
||||
// {0, 0, 0, 1} according to the configuration override above.
|
||||
entry := enr.WithEntry(eth2EnrKey, enc)
|
||||
temp := t.TempDir()
|
||||
randNum := rand.Int()
|
||||
tempPath := path.Join(temp, strconv.Itoa(randNum))
|
||||
@@ -267,18 +165,16 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
localNode.Set(entry)
|
||||
|
||||
want, err := signing.ComputeForkDigest([]byte{0, 0, 0, 0}, genesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, want[:], resp.CurrentForkDigest)
|
||||
assert.DeepEqual(t, nextForkVersion, resp.NextForkVersion)
|
||||
assert.Equal(t, nextForkEpoch, resp.NextForkEpoch, "Unexpected next fork epoch")
|
||||
assert.Equal(t, hexutil.Encode(current.ForkDigest[:]), hexutil.Encode(resp.CurrentForkDigest))
|
||||
assert.Equal(t, hexutil.Encode(next.ForkVersion[:]), hexutil.Encode(resp.NextForkVersion))
|
||||
assert.Equal(t, next.Epoch, resp.NextForkEpoch, "Unexpected next fork epoch")
|
||||
}
|
||||
|
||||
func TestAddForkEntry_Genesis(t *testing.T) {
|
||||
func TestAddForkEntry_NextForkVersion(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
temp := t.TempDir()
|
||||
randNum := rand.Int()
|
||||
tempPath := path.Join(temp, strconv.Itoa(randNum))
|
||||
@@ -288,17 +184,135 @@ func TestAddForkEntry_Genesis(t *testing.T) {
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
|
||||
bCfg := params.MainnetConfig()
|
||||
bCfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{}
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(params.BeaconConfig().GenesisForkVersion)] = bCfg.GenesisEpoch
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
localNode, err = addForkEntry(localNode, time.Now().Add(10*time.Second), bytesutil.PadTo([]byte{'A', 'B', 'C', 'D'}, 32))
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
// Add the fork entry to the local node's ENR.
|
||||
require.NoError(t, updateENR(localNode, current, next))
|
||||
fe, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
forkEntry, err := forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t,
|
||||
params.BeaconConfig().GenesisForkVersion, forkEntry.NextForkVersion,
|
||||
assert.Equal(t,
|
||||
hexutil.Encode(params.BeaconConfig().AltairForkVersion), hexutil.Encode(fe.NextForkVersion),
|
||||
"Wanted Next Fork Version to be equal to genesis fork version")
|
||||
|
||||
last := params.LastForkEpoch()
|
||||
current = params.GetNetworkScheduleEntry(last)
|
||||
next = params.NextNetworkScheduleEntry(last)
|
||||
require.NoError(t, updateENR(localNode, current, next))
|
||||
entry := params.NextNetworkScheduleEntry(last)
|
||||
fe, err = forkEntry(localNode.Node().Record())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
hexutil.Encode(entry.ForkVersion[:]), hexutil.Encode(fe.NextForkVersion),
|
||||
"Wanted Next Fork Version to be equal to last entry in schedule")
|
||||
|
||||
}
|
||||
|
||||
func TestUpdateENR_FuluForkDigest(t *testing.T) {
|
||||
setupTest := func(t *testing.T, fuluEnabled bool) (*enode.LocalNode, func()) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
if fuluEnabled {
|
||||
cfg.FuluForkEpoch = 100
|
||||
} else {
|
||||
cfg.FuluForkEpoch = cfg.FarFutureEpoch
|
||||
}
|
||||
cfg.FuluForkVersion = []byte{5, 0, 0, 0}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
cfg.InitializeForkSchedule()
|
||||
|
||||
pkey, err := privKey(&Config{DataDir: t.TempDir()})
|
||||
require.NoError(t, err, "Could not get private key")
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
cleanup := func() {
|
||||
db.Close()
|
||||
}
|
||||
|
||||
return localNode, cleanup
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fuluEnabled bool
|
||||
currentEntry params.NetworkScheduleEntry
|
||||
nextEntry params.NetworkScheduleEntry
|
||||
validateNFD func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry)
|
||||
}{
|
||||
{
|
||||
name: "different digests sets nfd to next digest",
|
||||
fuluEnabled: true,
|
||||
currentEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 50,
|
||||
ForkDigest: [4]byte{1, 2, 3, 4},
|
||||
ForkVersion: [4]byte{1, 0, 0, 0},
|
||||
},
|
||||
nextEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 100,
|
||||
ForkDigest: [4]byte{5, 6, 7, 8}, // Different from current
|
||||
ForkVersion: [4]byte{2, 0, 0, 0},
|
||||
},
|
||||
validateNFD: func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry) {
|
||||
var nfdValue []byte
|
||||
err := localNode.Node().Record().Load(enr.WithEntry(nfdEnrKey, &nfdValue))
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, nextEntry.ForkDigest[:], nfdValue, "nfd entry should equal next fork digest")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "same digests sets nfd to empty",
|
||||
fuluEnabled: true,
|
||||
currentEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 50,
|
||||
ForkDigest: [4]byte{1, 2, 3, 4},
|
||||
ForkVersion: [4]byte{1, 0, 0, 0},
|
||||
},
|
||||
nextEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 100,
|
||||
ForkDigest: [4]byte{1, 2, 3, 4}, // Same as current
|
||||
ForkVersion: [4]byte{2, 0, 0, 0},
|
||||
},
|
||||
validateNFD: func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry) {
|
||||
var nfdValue []byte
|
||||
err := localNode.Node().Record().Load(enr.WithEntry(nfdEnrKey, &nfdValue))
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, make([]byte, len(nextEntry.ForkDigest)), nfdValue, "nfd entry should be empty bytes when digests are same")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu disabled does not add nfd field",
|
||||
fuluEnabled: false,
|
||||
currentEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 50,
|
||||
ForkDigest: [4]byte{1, 2, 3, 4},
|
||||
ForkVersion: [4]byte{1, 0, 0, 0},
|
||||
},
|
||||
nextEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 100,
|
||||
ForkDigest: [4]byte{5, 6, 7, 8}, // Different from current
|
||||
ForkVersion: [4]byte{2, 0, 0, 0},
|
||||
},
|
||||
validateNFD: func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry) {
|
||||
var nfdValue []byte
|
||||
err := localNode.Node().Record().Load(enr.WithEntry(nfdEnrKey, &nfdValue))
|
||||
require.ErrorContains(t, "missing ENR key", err, "nfd field should not be present when Fulu fork is disabled")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
localNode, cleanup := setupTest(t, tt.fuluEnabled)
|
||||
defer cleanup()
|
||||
|
||||
currentEntry := tt.currentEntry
|
||||
nextEntry := tt.nextEntry
|
||||
require.NoError(t, updateENR(localNode, currentEntry, nextEntry))
|
||||
tt.validateNFD(t, localNode, nextEntry)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,27 +9,26 @@ import (
|
||||
// updates the node's discovery service to reflect any new fork version
|
||||
// changes.
|
||||
func (s *Service) forkWatcher() {
|
||||
// Exit early if discovery is disabled - there's no ENR to update
|
||||
if s.dv5Listener == nil {
|
||||
log.Debug("Discovery disabled, exiting fork watcher")
|
||||
return
|
||||
}
|
||||
|
||||
slotTicker := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
|
||||
var scheduleEntry params.NetworkScheduleEntry
|
||||
for {
|
||||
select {
|
||||
case currSlot := <-slotTicker.C():
|
||||
currEpoch := slots.ToEpoch(currSlot)
|
||||
if currEpoch == params.BeaconConfig().AltairForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().BellatrixForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().CapellaForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().DenebForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().ElectraForkEpoch ||
|
||||
currEpoch == params.BeaconConfig().FuluForkEpoch {
|
||||
// If we are in the fork epoch, we update our enr with
|
||||
// the updated fork digest. These repeatedly does
|
||||
// this over the epoch, which might be slightly wasteful
|
||||
// but is fine nonetheless.
|
||||
if s.dv5Listener != nil { // make sure it's not a local network
|
||||
_, err := addForkEntry(s.dv5Listener.LocalNode(), s.genesisTime, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not add fork entry")
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(currSlot)
|
||||
newEntry := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
if newEntry.ForkDigest != scheduleEntry.ForkDigest {
|
||||
nextEntry := params.NextNetworkScheduleEntry(currentEpoch)
|
||||
if err := updateENR(s.dv5Listener.LocalNode(), newEntry, nextEntry); err != nil {
|
||||
log.WithFields(newEntry.LogFields()).WithError(err).Error("Could not add fork entry")
|
||||
continue // don't replace scheduleEntry until this succeeds
|
||||
}
|
||||
scheduleEntry = newEntry
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting goroutine")
|
||||
|
||||
@@ -51,7 +51,7 @@ type (
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumn(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
BroadcastDataColumnSidecar(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
)
|
||||
|
||||
@@ -39,7 +38,7 @@ func MsgID(genesisValidatorsRoot []byte, pmsg *pubsubpb.Message) string {
|
||||
copy(msg, "invalid")
|
||||
return bytesutil.UnsafeCastToString(msg)
|
||||
}
|
||||
_, fEpoch, err := forks.RetrieveForkDataFromDigest(digest, genesisValidatorsRoot)
|
||||
_, fEpoch, err := params.ForkDataFromDigest(digest)
|
||||
if err != nil {
|
||||
// Impossible condition that should
|
||||
// never be hit.
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/golang/snappy"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -18,28 +18,27 @@ import (
|
||||
|
||||
func TestMsgID_HashesCorrectly(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := forks.CreateForkDigest(time.Now(), genesisValidatorsRoot)
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), bytesutil.ToBytes32([]byte{'A'}))
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
d := params.ForkDigest(clock.CurrentEpoch())
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
invalidSnappy := [32]byte{'J', 'U', 'N', 'K'}
|
||||
pMsg := &pubsubpb.Message{Data: invalidSnappy[:], Topic: &tpc}
|
||||
hashedData := hash.Hash(append(params.BeaconConfig().MessageDomainInvalidSnappy[:], pMsg.Data...))
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
nMsg := &pubsubpb.Message{Data: enc, Topic: &tpc}
|
||||
hashedData = hash.Hash(append(params.BeaconConfig().MessageDomainValidSnappy[:], validObj[:]...))
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(valRoot[:], nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, genesisValidatorsRoot)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().AltairForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
topicLen := uint64(len(tpc))
|
||||
@@ -52,7 +51,7 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
combinedObj = append(combinedObj, pMsg.Data...)
|
||||
hashedData := hash.Hash(combinedObj)
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
@@ -63,13 +62,12 @@ func TestMessageIDFunction_HashesCorrectlyAltair(t *testing.T) {
|
||||
combinedObj = append(combinedObj, validObj[:]...)
|
||||
hashedData = hash.Hash(combinedObj)
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesisValidatorsRoot := bytesutil.PadTo([]byte{'A'}, 32)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, genesisValidatorsRoot)
|
||||
d, err := signing.ComputeForkDigest(params.BeaconConfig().BellatrixForkVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
assert.NoError(t, err)
|
||||
tpc := fmt.Sprintf(p2p.BlockSubnetTopicFormat, d)
|
||||
topicLen := uint64(len(tpc))
|
||||
@@ -82,7 +80,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
combinedObj = append(combinedObj, pMsg.Data...)
|
||||
hashedData := hash.Hash(combinedObj)
|
||||
msgID := string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, pMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], pMsg), "Got incorrect msg id")
|
||||
|
||||
validObj := [32]byte{'v', 'a', 'l', 'i', 'd'}
|
||||
enc := snappy.Encode(nil, validObj[:])
|
||||
@@ -93,7 +91,7 @@ func TestMessageIDFunction_HashesCorrectlyBellatrix(t *testing.T) {
|
||||
combinedObj = append(combinedObj, validObj[:]...)
|
||||
hashedData = hash.Hash(combinedObj)
|
||||
msgID = string(hashedData[:20])
|
||||
assert.Equal(t, msgID, p2p.MsgID(genesisValidatorsRoot, nMsg), "Got incorrect msg id")
|
||||
assert.Equal(t, msgID, p2p.MsgID(params.BeaconConfig().GenesisValidatorsRoot[:], nMsg), "Got incorrect msg id")
|
||||
}
|
||||
|
||||
func TestMsgID_WithNilTopic(t *testing.T) {
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
rSubD = 8 // random gossip target
|
||||
)
|
||||
|
||||
var errInvalidTopic = errors.New("invalid topic format")
|
||||
var ErrInvalidTopic = errors.New("invalid topic format")
|
||||
|
||||
// Specifies the fixed size context length.
|
||||
const digestLength = 4
|
||||
@@ -219,12 +219,12 @@ func convertTopicScores(topicMap map[string]*pubsub.TopicScoreSnapshot) map[stri
|
||||
func ExtractGossipDigest(topic string) ([4]byte, error) {
|
||||
// Ensure the topic prefix is correct.
|
||||
if len(topic) < len(gossipTopicPrefix)+1 || topic[:len(gossipTopicPrefix)] != gossipTopicPrefix {
|
||||
return [4]byte{}, errInvalidTopic
|
||||
return [4]byte{}, ErrInvalidTopic
|
||||
}
|
||||
start := len(gossipTopicPrefix)
|
||||
end := strings.Index(topic[start:], "/")
|
||||
if end == -1 { // Ensure a topic suffix exists.
|
||||
return [4]byte{}, errInvalidTopic
|
||||
return [4]byte{}, ErrInvalidTopic
|
||||
}
|
||||
end += start
|
||||
strDigest := topic[start:end]
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -32,81 +33,76 @@ var _ pubsub.SubscriptionFilter = (*Service)(nil)
|
||||
// (Note: BlobSidecar is not included in this list since it is superseded by DataColumnSidecar)
|
||||
const pubsubSubscriptionRequestLimit = 500
|
||||
|
||||
func (s *Service) setAllForkDigests() {
|
||||
entries := params.SortedNetworkScheduleEntries()
|
||||
s.allForkDigests = make(map[[4]byte]struct{}, len(entries))
|
||||
for _, entry := range entries {
|
||||
s.allForkDigests[entry.ForkDigest] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errNotReadyToSubscribe = fmt.Errorf("not ready to subscribe, service is not initialized")
|
||||
errMissingLeadingSlash = fmt.Errorf("topic is missing leading slash")
|
||||
errTopicMissingProtocolVersion = fmt.Errorf("topic is missing protocol version (eth2)")
|
||||
errTopicPathWrongPartCount = fmt.Errorf("topic path has wrong part count")
|
||||
errDigestInvalid = fmt.Errorf("digest is invalid")
|
||||
errDigestUnexpected = fmt.Errorf("digest is unexpected")
|
||||
errSnappySuffixMissing = fmt.Errorf("snappy suffix is missing")
|
||||
errTopicNotFound = fmt.Errorf("topic not found in gossip topic mappings")
|
||||
)
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
func (s *Service) CanSubscribe(topic string) bool {
|
||||
if !s.isInitialized() {
|
||||
if err := s.checkSubscribable(topic); err != nil {
|
||||
if !errors.Is(err, errNotReadyToSubscribe) {
|
||||
logrus.WithError(err).WithField("topic", topic).Debug("CanSubscribe failed")
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Service) checkSubscribable(topic string) error {
|
||||
if !s.isInitialized() {
|
||||
return errNotReadyToSubscribe
|
||||
}
|
||||
parts := strings.Split(topic, "/")
|
||||
if len(parts) != 5 {
|
||||
return false
|
||||
return errTopicPathWrongPartCount
|
||||
}
|
||||
// The topic must start with a slash, which means the first part will be empty.
|
||||
if parts[0] != "" {
|
||||
return false
|
||||
return errMissingLeadingSlash
|
||||
}
|
||||
if parts[1] != "eth2" {
|
||||
return false
|
||||
protocol, rawDigest, suffix := parts[1], parts[2], parts[4]
|
||||
if protocol != "eth2" {
|
||||
return errTopicMissingProtocolVersion
|
||||
}
|
||||
phase0ForkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine fork digest")
|
||||
return false
|
||||
}
|
||||
altairForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().AltairForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine altair fork digest")
|
||||
return false
|
||||
}
|
||||
bellatrixForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().BellatrixForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Bellatrix fork digest")
|
||||
return false
|
||||
}
|
||||
capellaForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().CapellaForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Capella fork digest")
|
||||
return false
|
||||
}
|
||||
denebForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().DenebForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Deneb fork digest")
|
||||
return false
|
||||
}
|
||||
electraForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().ElectraForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Electra fork digest")
|
||||
return false
|
||||
}
|
||||
fuluForkDigest, err := forks.ForkDigestFromEpoch(params.BeaconConfig().FuluForkEpoch, s.genesisValidatorsRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not determine Fulu fork digest")
|
||||
return false
|
||||
}
|
||||
switch parts[2] {
|
||||
case fmt.Sprintf("%x", phase0ForkDigest):
|
||||
case fmt.Sprintf("%x", altairForkDigest):
|
||||
case fmt.Sprintf("%x", bellatrixForkDigest):
|
||||
case fmt.Sprintf("%x", capellaForkDigest):
|
||||
case fmt.Sprintf("%x", denebForkDigest):
|
||||
case fmt.Sprintf("%x", electraForkDigest):
|
||||
case fmt.Sprintf("%x", fuluForkDigest):
|
||||
default:
|
||||
return false
|
||||
if suffix != encoder.ProtocolSuffixSSZSnappy {
|
||||
return errSnappySuffixMissing
|
||||
}
|
||||
|
||||
if parts[4] != encoder.ProtocolSuffixSSZSnappy {
|
||||
return false
|
||||
var digest [4]byte
|
||||
dl, err := hex.Decode(digest[:], []byte(rawDigest))
|
||||
if err != nil {
|
||||
return errors.Wrapf(errDigestInvalid, "%v", err)
|
||||
}
|
||||
if dl != 4 {
|
||||
return errors.Wrapf(errDigestInvalid, "wrong byte length")
|
||||
}
|
||||
if _, ok := s.allForkDigests[digest]; !ok {
|
||||
return errDigestUnexpected
|
||||
}
|
||||
|
||||
// Check the incoming topic matches any topic mapping. This includes a check for part[3].
|
||||
for gt := range gossipTopicMappings {
|
||||
if _, err := scanfcheck(strings.Join(parts[0:4], "/"), gt); err == nil {
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return errTopicNotFound
|
||||
}
|
||||
|
||||
// FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications.
|
||||
@@ -124,7 +120,22 @@ func (s *Service) FilterIncomingSubscriptions(peerID peer.ID, subs []*pubsubpb.R
|
||||
return nil, pubsub.ErrTooManySubscriptions
|
||||
}
|
||||
|
||||
return pubsub.FilterSubscriptions(subs, s.CanSubscribe), nil
|
||||
return pubsub.FilterSubscriptions(subs, s.logCheckSubscribableError(peerID)), nil
|
||||
}
|
||||
|
||||
func (s *Service) logCheckSubscribableError(pid peer.ID) func(string) bool {
|
||||
return func(topic string) bool {
|
||||
if err := s.checkSubscribable(topic); err != nil {
|
||||
if !errors.Is(err, errNotReadyToSubscribe) {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"topic": topic,
|
||||
}).Debug("Peer subscription rejected")
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// scanfcheck uses fmt.Sscanf to check that a given string matches expected format. This method
|
||||
|
||||
@@ -12,8 +12,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
@@ -22,12 +20,11 @@ import (
|
||||
|
||||
func TestService_CanSubscribe(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
currentFork := [4]byte{0x01, 0x02, 0x03, 0x04}
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
var valRoot [32]byte
|
||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||
assert.NoError(t, err)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
currentFork := params.GetNetworkScheduleEntry(clock.CurrentEpoch()).ForkDigest
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
type test struct {
|
||||
name string
|
||||
topic string
|
||||
@@ -109,12 +106,14 @@ func TestService_CanSubscribe(t *testing.T) {
|
||||
}
|
||||
tests = append(tests, tt)
|
||||
}
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisValidatorsRoot: valRoot[:],
|
||||
genesisTime: genesisTime,
|
||||
genesisTime: clock.GenesisTime(),
|
||||
}
|
||||
s.setAllForkDigests()
|
||||
if got := s.CanSubscribe(tt.topic); got != tt.want {
|
||||
t.Errorf("CanSubscribe(%s) = %v, want %v", tt.topic, got, tt.want)
|
||||
}
|
||||
@@ -220,11 +219,10 @@ func TestGossipTopicMapping_scanfcheck_GossipTopicFormattingSanityCheck(t *testi
|
||||
|
||||
func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
digest := params.ForkDigest(clock.CurrentEpoch())
|
||||
validProtocolSuffix := "/" + encoder.ProtocolSuffixSSZSnappy
|
||||
genesisTime := time.Now()
|
||||
var valRoot [32]byte
|
||||
digest, err := forks.CreateForkDigest(genesisTime, valRoot[:])
|
||||
assert.NoError(t, err)
|
||||
type args struct {
|
||||
id peer.ID
|
||||
subs []*pubsubpb.RPC_SubOpts
|
||||
@@ -321,12 +319,14 @@ func TestService_FilterIncomingSubscriptions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
valRoot := clock.GenesisValidatorsRoot()
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := &Service{
|
||||
genesisValidatorsRoot: valRoot[:],
|
||||
genesisTime: genesisTime,
|
||||
genesisTime: clock.GenesisTime(),
|
||||
}
|
||||
s.setAllForkDigests()
|
||||
got, err := s.FilterIncomingSubscriptions(tt.args.id, tt.args.subs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("FilterIncomingSubscriptions() error = %v, wantErr %v", err, tt.wantErr)
|
||||
|
||||
@@ -169,7 +169,7 @@ var (
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -63,42 +64,42 @@ var (
|
||||
)
|
||||
|
||||
// Service for managing peer to peer (p2p) networking.
|
||||
type (
|
||||
Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
addrFilter *multiaddr.Filters
|
||||
ipLimiter *leakybucket.Collector
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener ListenerRebooter
|
||||
startupErr error
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfo *custodyInfo
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
}
|
||||
type Service struct {
|
||||
started bool
|
||||
isPreGenesis bool
|
||||
pingMethod func(ctx context.Context, id peer.ID) error
|
||||
pingMethodLock sync.RWMutex
|
||||
cancel context.CancelFunc
|
||||
cfg *Config
|
||||
peers *peers.Status
|
||||
addrFilter *multiaddr.Filters
|
||||
ipLimiter *leakybucket.Collector
|
||||
privKey *ecdsa.PrivateKey
|
||||
metaData metadata.Metadata
|
||||
pubsub *pubsub.PubSub
|
||||
joinedTopics map[string]*pubsub.Topic
|
||||
joinedTopicsLock sync.RWMutex
|
||||
subnetsLock map[uint64]*sync.RWMutex
|
||||
subnetsLockLock sync.Mutex // Lock access to subnetsLock
|
||||
initializationLock sync.Mutex
|
||||
dv5Listener ListenerRebooter
|
||||
startupErr error
|
||||
ctx context.Context
|
||||
host host.Host
|
||||
genesisTime time.Time
|
||||
genesisValidatorsRoot []byte
|
||||
activeValidatorCount uint64
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfo *custodyInfo
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
clock *startup.Clock
|
||||
allForkDigests map[[4]byte]struct{}
|
||||
}
|
||||
|
||||
custodyInfo struct {
|
||||
earliestAvailableSlot primitives.Slot
|
||||
groupCount uint64
|
||||
}
|
||||
)
|
||||
type custodyInfo struct {
|
||||
earliestAvailableSlot primitives.Slot
|
||||
groupCount uint64
|
||||
}
|
||||
|
||||
// NewService initializes a new p2p service compatible with shared.Service interface. No
|
||||
// connections are made until the Start function is called during the service registry startup.
|
||||
@@ -202,6 +203,7 @@ func (s *Service) Start() {
|
||||
// Waits until the state is initialized via an event feed.
|
||||
// Used for fork-related data when connecting peers.
|
||||
s.awaitStateInitialized()
|
||||
s.setAllForkDigests()
|
||||
s.isPreGenesis = false
|
||||
|
||||
var relayNodes []string
|
||||
@@ -455,7 +457,7 @@ func (s *Service) awaitStateInitialized() {
|
||||
s.genesisTime = clock.GenesisTime()
|
||||
gvr := clock.GenesisValidatorsRoot()
|
||||
s.genesisValidatorsRoot = gvr[:]
|
||||
_, err = s.currentForkDigest() // initialize fork digest cache
|
||||
_, err = s.currentForkDigest()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not initialize fork digest")
|
||||
}
|
||||
|
||||
@@ -13,15 +13,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -32,48 +30,6 @@ import (
|
||||
|
||||
const testPingInterval = 100 * time.Millisecond
|
||||
|
||||
type mockListener struct {
|
||||
localNode *enode.LocalNode
|
||||
}
|
||||
|
||||
func (m mockListener) Self() *enode.Node {
|
||||
return m.localNode.Node()
|
||||
}
|
||||
|
||||
func (mockListener) Close() {
|
||||
// no-op
|
||||
}
|
||||
|
||||
func (mockListener) Lookup(enode.ID) []*enode.Node {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) ReadRandomNodes(_ []*enode.Node) int {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) Resolve(*enode.Node) *enode.Node {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) Ping(*enode.Node) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) RequestENR(*enode.Node) (*enode.Node, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) LocalNode() *enode.LocalNode {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) RandomNodes() enode.Iterator {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) RebootListener() error { panic("implement me") }
|
||||
|
||||
func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP) {
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
@@ -89,7 +45,7 @@ func TestService_Stop_SetsStartedToFalse(t *testing.T) {
|
||||
s, err := NewService(t.Context(), &Config{StateNotifier: &mock.MockStateNotifier{}, DB: testDB.SetupDB(t)})
|
||||
require.NoError(t, err)
|
||||
s.started = true
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
assert.NoError(t, s.Stop())
|
||||
assert.Equal(t, false, s.started)
|
||||
}
|
||||
@@ -115,7 +71,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
}
|
||||
s, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
s.custodyInfo = &custodyInfo{}
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
@@ -135,14 +91,14 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
func TestService_Status_NotRunning(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s := &Service{started: false}
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
assert.ErrorContains(t, "not running", s.Status(), "Status returned wrong error")
|
||||
}
|
||||
|
||||
func TestService_Status_NoGenesisTimeSet(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s := &Service{started: true}
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
assert.ErrorContains(t, "no genesis time set", s.Status(), "Status returned wrong error")
|
||||
|
||||
s.genesisTime = time.Now()
|
||||
@@ -346,14 +302,16 @@ func TestPeer_Disconnect(t *testing.T) {
|
||||
|
||||
func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
gs := startup.NewClockSynchronizer()
|
||||
s, err := NewService(ctx, &Config{StateNotifier: &mock.MockStateNotifier{}, ClockWaiter: gs, DB: testDB.SetupDB(t)})
|
||||
require.NoError(t, err)
|
||||
|
||||
go s.awaitStateInitialized()
|
||||
fd := initializeStateWithForkDigest(ctx, t, gs)
|
||||
s.setAllForkDigests()
|
||||
s.awaitStateInitialized()
|
||||
|
||||
assert.Equal(t, 0, len(s.joinedTopics))
|
||||
|
||||
@@ -382,15 +340,13 @@ func TestService_JoinLeaveTopic(t *testing.T) {
|
||||
// digest associated with that genesis event.
|
||||
func initializeStateWithForkDigest(_ context.Context, t *testing.T, gs startup.ClockSetter) [4]byte {
|
||||
gt := prysmTime.Now()
|
||||
gvr := bytesutil.ToBytes32(bytesutil.PadTo([]byte("genesis validators root"), 32))
|
||||
require.NoError(t, gs.SetClock(startup.NewClock(gt, gvr)))
|
||||
|
||||
fd, err := forks.CreateForkDigest(gt, gvr[:])
|
||||
require.NoError(t, err)
|
||||
gvr := params.BeaconConfig().GenesisValidatorsRoot
|
||||
clock := startup.NewClock(gt, gvr)
|
||||
require.NoError(t, gs.SetClock(clock))
|
||||
|
||||
time.Sleep(50 * time.Millisecond) // wait for pubsub filter to initialize.
|
||||
|
||||
return fd
|
||||
return params.ForkDigest(clock.CurrentEpoch())
|
||||
}
|
||||
|
||||
func TestService_connectWithPeer(t *testing.T) {
|
||||
|
||||
@@ -134,6 +134,24 @@ func (s *Service) FindAndDialPeersWithSubnets(
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDefectiveSubnets updates the defective subnets map when a node with matching subnets is found.
|
||||
// It decrements the defective count for each subnet the node satisfies and removes subnets
|
||||
// that are fully satisfied (count reaches 0).
|
||||
func updateDefectiveSubnets(
|
||||
nodeSubnets map[uint64]bool,
|
||||
defectiveSubnets map[uint64]int,
|
||||
) {
|
||||
for subnet := range defectiveSubnets {
|
||||
if !nodeSubnets[subnet] {
|
||||
continue
|
||||
}
|
||||
defectiveSubnets[subnet]--
|
||||
if defectiveSubnets[subnet] == 0 {
|
||||
delete(defectiveSubnets, subnet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// findPeersWithSubnets finds peers subscribed to defective subnets in batches
|
||||
// until enough peers are found or the context is canceled.
|
||||
// It returns new peers found during the search.
|
||||
@@ -169,6 +187,7 @@ func (s *Service) findPeersWithSubnets(
|
||||
|
||||
// Crawl the network for peers subscribed to the defective subnets.
|
||||
nodeByNodeID := make(map[enode.ID]*enode.Node)
|
||||
|
||||
for len(defectiveSubnets) > 0 && iterator.Next() {
|
||||
if err := ctx.Err(); err != nil {
|
||||
// Convert the map to a slice.
|
||||
@@ -180,14 +199,28 @@ func (s *Service) findPeersWithSubnets(
|
||||
return peersToDial, err
|
||||
}
|
||||
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
node := iterator.Node()
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() >= node.Seq() {
|
||||
continue // keep existing and skip.
|
||||
}
|
||||
|
||||
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
|
||||
// Skip peer not matching the filter.
|
||||
if !s.filterPeer(node) {
|
||||
if ok {
|
||||
// this means the existing peer with the lower sequence number is no longer valid
|
||||
delete(nodeByNodeID, existing.ID())
|
||||
// Note: We are choosing to not rollback changes to the defective subnets map in favor of calling s.defectiveSubnets once again after dialing peers.
|
||||
// This is a case that should rarely happen and should be handled through a second iteration in FindAndDialPeersWithSubnets
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
nodeSubnets, err := filter(node)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter node")
|
||||
@@ -196,30 +229,14 @@ func (s *Service) findPeersWithSubnets(
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() > node.Seq() {
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
// We found a new peer. Modify the defective subnets map
|
||||
// and the filter accordingly.
|
||||
for subnet := range defectiveSubnets {
|
||||
if !nodeSubnets[subnet] {
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
defectiveSubnets[subnet]--
|
||||
|
||||
if defectiveSubnets[subnet] == 0 {
|
||||
delete(defectiveSubnets, subnet)
|
||||
}
|
||||
|
||||
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
updateDefectiveSubnets(nodeSubnets, defectiveSubnets)
|
||||
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -11,14 +10,19 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ecdsaprysm "github.com/OffchainLabs/prysm/v6/crypto/ecdsa"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
@@ -36,17 +40,8 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
// find and connect to a node already subscribed to a specific subnet.
|
||||
// In our case: The node i is subscribed to subnet i, with i = 1, 2, 3
|
||||
|
||||
// Define the genesis validators root, to ensure everybody is on the same network.
|
||||
const (
|
||||
genesisValidatorRootStr = "0xdeadbeefcafecafedeadbeefcafecafedeadbeefcafecafedeadbeefcafecafe"
|
||||
subnetCount = 3
|
||||
minimumPeersPerSubnet = 1
|
||||
)
|
||||
|
||||
genesisValidatorsRoot, err := hex.DecodeString(genesisValidatorRootStr[2:])
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a context.
|
||||
const subnetCount = 3
|
||||
const minimumPeersPerSubnet = 1
|
||||
ctx := t.Context()
|
||||
|
||||
// Use shorter period for testing.
|
||||
@@ -58,6 +53,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
|
||||
// Create flags.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
@@ -74,7 +70,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
bootNodeService := &Service{
|
||||
cfg: &Config{UDPPort: 2000, TCPPort: 3000, QUICPort: 3000, DisableLivenessCheck: true, PingInterval: testPingInterval},
|
||||
genesisTime: genesisTime,
|
||||
genesisValidatorsRoot: genesisValidatorsRoot,
|
||||
genesisValidatorsRoot: params.BeaconConfig().GenesisValidatorsRoot[:],
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
@@ -111,7 +107,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
|
||||
service.custodyInfo = &custodyInfo{}
|
||||
|
||||
nodeForkDigest, err := service.currentForkDigest()
|
||||
@@ -158,11 +154,11 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
DB: db,
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, cfg)
|
||||
service, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.genesisTime = genesisTime
|
||||
service.genesisValidatorsRoot = genesisValidatorsRoot
|
||||
service.genesisValidatorsRoot = params.BeaconConfig().GenesisValidatorsRoot[:]
|
||||
service.custodyInfo = &custodyInfo{}
|
||||
|
||||
service.Start()
|
||||
@@ -550,3 +546,552 @@ func TestInitializePersistentSubnets(t *testing.T) {
|
||||
assert.Equal(t, 2, len(subs))
|
||||
assert.Equal(t, true, expTime.After(time.Now()))
|
||||
}
|
||||
|
||||
func TestFindPeersWithSubnets_NodeDeduplication(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
localNode2 := createTestNodeWithID(t, "node2")
|
||||
localNode3 := createTestNodeWithID(t, "node3")
|
||||
|
||||
// Create different sequence versions of node1 with subnet 1
|
||||
setNodeSubnets(localNode1, []uint64{1})
|
||||
setNodeSeq(localNode1, 1)
|
||||
node1_seq1_subnet1 := localNode1.Node()
|
||||
setNodeSeq(localNode1, 2)
|
||||
node1_seq2_subnet1 := localNode1.Node() // Same ID, higher seq
|
||||
setNodeSeq(localNode1, 3)
|
||||
node1_seq3_subnet1 := localNode1.Node() // Same ID, even higher seq
|
||||
|
||||
// Node2 with different sequences and subnets
|
||||
setNodeSubnets(localNode2, []uint64{1})
|
||||
node2_seq1_subnet1 := localNode2.Node()
|
||||
setNodeSubnets(localNode2, []uint64{2}) // Different subnet
|
||||
setNodeSeq(localNode2, 2)
|
||||
node2_seq2_subnet2 := localNode2.Node()
|
||||
|
||||
// Node3 with multiple subnets
|
||||
setNodeSubnets(localNode3, []uint64{1, 2})
|
||||
node3_seq1_subnet1_2 := localNode3.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*enode.Node
|
||||
defectiveSubnets map[uint64]int
|
||||
expectedCount int
|
||||
description string
|
||||
eval func(t *testing.T, result []*enode.Node) // Custom validation function
|
||||
}{
|
||||
{
|
||||
name: "No duplicates - unique nodes with same subnet",
|
||||
nodes: []*enode.Node{
|
||||
node2_seq1_subnet1,
|
||||
node3_seq1_subnet1_2,
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should return all unique nodes subscribed to subnet",
|
||||
eval: nil, // No special validation needed
|
||||
},
|
||||
{
|
||||
name: "Duplicate with lower seq first - should replace",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_subnet1,
|
||||
node1_seq2_subnet1, // Higher seq, should replace
|
||||
node2_seq1_subnet1, // Different node to ensure we process enough nodes
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
expectedCount: 2,
|
||||
description: "Should replace with higher seq node for same subnet",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
found := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2_subnet1.ID() && node.Seq() == node1_seq2_subnet1.Seq() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Should have node with higher seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with higher seq first - should keep existing",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq3_subnet1, // Higher seq
|
||||
node1_seq2_subnet1, // Lower seq, should be skipped (continue branch)
|
||||
node1_seq1_subnet1, // Even lower seq, should also be skipped (continue branch)
|
||||
node2_seq1_subnet1, // Different node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should keep existing node with higher seq and skip lower seq duplicates",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
found := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_subnet1.ID() && node.Seq() == node1_seq3_subnet1.Seq() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Should have node with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple updates for same node",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_subnet1,
|
||||
node1_seq2_subnet1, // Should replace seq1
|
||||
node1_seq3_subnet1, // Should replace seq2
|
||||
node2_seq1_subnet1, // Different node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should keep updating to highest seq",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
found := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_subnet1.ID() && node.Seq() == node1_seq3_subnet1.Seq() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Should have node with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with equal seq in subnets - should skip",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq2_subnet1, // First occurrence
|
||||
node1_seq2_subnet1, // Same exact node instance, should be skipped (continue branch)
|
||||
node2_seq1_subnet1, // Different node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should skip duplicate with equal sequence number in subnet search",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
node1Count := 0
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2_subnet1.ID() {
|
||||
require.Equal(t, node1_seq2_subnet1.Seq(), node.Seq(), "Node1 should have expected seq")
|
||||
foundNode1 = true
|
||||
node1Count++
|
||||
}
|
||||
if node.ID() == node2_seq1_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1, "Should have node1")
|
||||
require.Equal(t, true, foundNode2, "Should have node2")
|
||||
require.Equal(t, 1, node1Count, "Should have exactly one instance of node1")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mix with different subnets",
|
||||
nodes: []*enode.Node{
|
||||
node2_seq1_subnet1,
|
||||
node2_seq2_subnet2, // Higher seq but different subnet
|
||||
node3_seq1_subnet1_2,
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2, 2: 1},
|
||||
expectedCount: 2, // node2 (latest) and node3
|
||||
description: "Should handle nodes with different subnet subscriptions",
|
||||
eval: nil, // Basic count validation is sufficient
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
localNode2 := createTestNodeWithID(t, "node2")
|
||||
localNode3 := createTestNodeWithID(t, "node3")
|
||||
|
||||
// Create versions of node1 with subnet 1
|
||||
setNodeSubnets(localNode1, []uint64{1})
|
||||
setNodeSeq(localNode1, 1)
|
||||
node1_seq1_valid_subnet1 := localNode1.Node()
|
||||
|
||||
// Create bad version (higher seq)
|
||||
setNodeSeq(localNode1, 2)
|
||||
node1_seq2_bad_subnet1 := localNode1.Node()
|
||||
|
||||
// Create another valid version
|
||||
setNodeSeq(localNode1, 3)
|
||||
node1_seq3_valid_subnet1 := localNode1.Node()
|
||||
|
||||
// Node2 with subnet 1
|
||||
setNodeSubnets(localNode2, []uint64{1})
|
||||
node2_seq1_valid_subnet1 := localNode2.Node()
|
||||
|
||||
// Node3 with subnet 1 and 2
|
||||
setNodeSubnets(localNode3, []uint64{1, 2})
|
||||
node3_seq1_valid_subnet1_2 := localNode3.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*enode.Node
|
||||
defectiveSubnets map[uint64]int
|
||||
expectedCount int
|
||||
description string
|
||||
eval func(t *testing.T, result []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "Valid node in subnet followed by bad version - should remove",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_valid_subnet1, // First add valid node with subnet 1
|
||||
node1_seq2_bad_subnet1, // Invalid version with higher seq - should delete
|
||||
node2_seq1_valid_subnet1, // Different valid node with subnet 1
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
expectedCount: 1, // Only node2 should remain
|
||||
description: "Should remove node from map when bad version arrives, even if it has required subnet",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq1_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should have been removed despite having subnet")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Bad node with subnet stays bad even with higher seq",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq2_bad_subnet1, // First bad node - not added
|
||||
node1_seq3_valid_subnet1, // Higher seq but same bad peer ID
|
||||
node2_seq1_valid_subnet1, // Different valid node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 1, // Only node2 (node1 remains bad)
|
||||
description: "Bad peer with subnet remains bad even with higher seq",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should remain bad despite having subnet")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mixed valid and bad nodes with subnets",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_valid_subnet1, // Add valid node1 with subnet
|
||||
node2_seq1_valid_subnet1, // Add valid node2 with subnet
|
||||
node1_seq2_bad_subnet1, // Invalid update for node1 - should remove
|
||||
node3_seq1_valid_subnet1_2, // Add valid node3 with multiple subnets
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 3}, // Need 3 peers for subnet 1
|
||||
expectedCount: 2, // Only node2 and node3 should remain
|
||||
description: "Should handle removal of nodes with subnets when they become bad",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
foundNode3 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq1_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
if node.ID() == node3_seq1_valid_subnet1_2.ID() {
|
||||
foundNode3 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should have been removed")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
require.Equal(t, true, foundNode3, "Node3 should be present")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Node with subnet marked bad stays bad for all sequences",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_valid_subnet1, // Add valid node1 with subnet
|
||||
node1_seq2_bad_subnet1, // Bad update - should remove and mark bad
|
||||
node1_seq3_valid_subnet1, // Higher seq but still same bad peer ID
|
||||
node2_seq1_valid_subnet1, // Different valid node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 1, // Only node2 (node1 stays bad)
|
||||
description: "Once marked bad, subnet peer stays bad for all sequences",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should stay bad")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Initialize flags for subnet operations
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
// Create test P2P instance
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
// Create mock service
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Mark specific node versions as "bad" to simulate filterPeer failures
|
||||
for _, node := range tt.nodes {
|
||||
if node == node1_seq2_bad_subnet1 {
|
||||
// Get peer ID from the node to mark it as bad
|
||||
peerData, _, _ := convertToAddrInfo(node)
|
||||
if peerData != nil {
|
||||
s.peers.Add(node.Record(), peerData.ID, nil, network.DirUnknown)
|
||||
// Mark as bad peer - this will make filterPeer return false
|
||||
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// callbackIterator allows us to execute callbacks at specific points during iteration
|
||||
type callbackIteratorForSubnets struct {
|
||||
nodes []*enode.Node
|
||||
index int
|
||||
callbacks map[int]func() // map from index to callback function
|
||||
}
|
||||
|
||||
func (c *callbackIteratorForSubnets) Next() bool {
|
||||
// Execute callback before checking if we can continue (if one exists)
|
||||
if callback, exists := c.callbacks[c.index]; exists {
|
||||
callback()
|
||||
}
|
||||
|
||||
return c.index < len(c.nodes)
|
||||
}
|
||||
|
||||
func (c *callbackIteratorForSubnets) Node() *enode.Node {
|
||||
if c.index >= len(c.nodes) {
|
||||
return nil
|
||||
}
|
||||
|
||||
node := c.nodes[c.index]
|
||||
c.index++
|
||||
return node
|
||||
}
|
||||
|
||||
func (c *callbackIteratorForSubnets) Close() {
|
||||
// Nothing to clean up for this simple implementation
|
||||
}
|
||||
|
||||
func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
// This test successfully triggers delete(nodeByNodeID, node.ID()) in subnets.go by:
|
||||
// 1. Processing node1_seq1 first (passes filterPeer, gets added to map
|
||||
// 2. Callback marks peer as bad before processing node1_seq2"
|
||||
// 3. Processing node1_seq2 (fails filterPeer, triggers delete since ok=true
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
// Create LocalNode with same ID but different sequences
|
||||
localNode1 := createTestNodeWithID(t, "testnode")
|
||||
setNodeSubnets(localNode1, []uint64{1})
|
||||
node1_seq1 := localNode1.Node() // Get current node
|
||||
currentSeq := node1_seq1.Seq()
|
||||
setNodeSeq(localNode1, currentSeq+1) // Increment sequence by 1
|
||||
node1_seq2 := localNode1.Node() // This should have higher seq
|
||||
|
||||
// Additional node to ensure we have enough peers to process
|
||||
localNode2 := createTestNodeWithID(t, "othernode")
|
||||
setNodeSubnets(localNode2, []uint64{1})
|
||||
node2 := localNode2.Node()
|
||||
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Create iterator with callback that marks peer as bad before processing node1_seq2
|
||||
iter := &callbackIteratorForSubnets{
|
||||
nodes: []*enode.Node{node1_seq1, node1_seq2, node2},
|
||||
index: 0,
|
||||
callbacks: map[int]func(){
|
||||
1: func() { // Before processing node1_seq2 (index 1)
|
||||
// Mark peer as bad before processing node1_seq2
|
||||
peerData, _, _ := convertToAddrInfo(node1_seq2)
|
||||
if peerData != nil {
|
||||
service.peers.Add(node1_seq2.Record(), peerData.ID, nil, network.DirUnknown)
|
||||
// Mark as bad peer - need enough increments to exceed threshold (6)
|
||||
for i := 0; i < 10; i++ {
|
||||
service.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
|
||||
|
||||
digest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run findPeersWithSubnets - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := service.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(result))
|
||||
require.Equal(t, localNode2.Node().ID(), result[0].ID()) // only node2 should remain
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"fuzz_p2p.go",
|
||||
"mock_broadcaster.go",
|
||||
"mock_host.go",
|
||||
"mock_listener.go",
|
||||
"mock_metadataprovider.go",
|
||||
"mock_peermanager.go",
|
||||
"mock_peersprovider.go",
|
||||
|
||||
@@ -167,8 +167,8 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
// BroadcastDataColumnSidecar -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -62,8 +62,8 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
128
beacon-chain/p2p/testing/mock_listener.go
Normal file
128
beacon-chain/p2p/testing/mock_listener.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
// MockListener is a mock implementation of the Listener and ListenerRebooter interfaces
|
||||
// that can be used in tests. It provides configurable behavior for all methods.
|
||||
type MockListener struct {
|
||||
LocalNodeFunc func() *enode.LocalNode
|
||||
SelfFunc func() *enode.Node
|
||||
RandomNodesFunc func() enode.Iterator
|
||||
LookupFunc func(enode.ID) []*enode.Node
|
||||
ResolveFunc func(*enode.Node) *enode.Node
|
||||
PingFunc func(*enode.Node) error
|
||||
RequestENRFunc func(*enode.Node) (*enode.Node, error)
|
||||
RebootFunc func() error
|
||||
CloseFunc func()
|
||||
|
||||
// Default implementations
|
||||
localNode *enode.LocalNode
|
||||
iterator enode.Iterator
|
||||
}
|
||||
|
||||
// NewMockListener creates a new MockListener with default implementations
|
||||
func NewMockListener(localNode *enode.LocalNode, iterator enode.Iterator) *MockListener {
|
||||
return &MockListener{
|
||||
localNode: localNode,
|
||||
iterator: iterator,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockListener) LocalNode() *enode.LocalNode {
|
||||
if m.LocalNodeFunc != nil {
|
||||
return m.LocalNodeFunc()
|
||||
}
|
||||
return m.localNode
|
||||
}
|
||||
|
||||
func (m *MockListener) Self() *enode.Node {
|
||||
if m.SelfFunc != nil {
|
||||
return m.SelfFunc()
|
||||
}
|
||||
if m.localNode != nil {
|
||||
return m.localNode.Node()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) RandomNodes() enode.Iterator {
|
||||
if m.RandomNodesFunc != nil {
|
||||
return m.RandomNodesFunc()
|
||||
}
|
||||
return m.iterator
|
||||
}
|
||||
|
||||
func (m *MockListener) Lookup(id enode.ID) []*enode.Node {
|
||||
if m.LookupFunc != nil {
|
||||
return m.LookupFunc(id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) Resolve(node *enode.Node) *enode.Node {
|
||||
if m.ResolveFunc != nil {
|
||||
return m.ResolveFunc(node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) Ping(node *enode.Node) error {
|
||||
if m.PingFunc != nil {
|
||||
return m.PingFunc(node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) RequestENR(node *enode.Node) (*enode.Node, error) {
|
||||
if m.RequestENRFunc != nil {
|
||||
return m.RequestENRFunc(node)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockListener) RebootListener() error {
|
||||
if m.RebootFunc != nil {
|
||||
return m.RebootFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) Close() {
|
||||
if m.CloseFunc != nil {
|
||||
m.CloseFunc()
|
||||
}
|
||||
}
|
||||
|
||||
// MockIterator is a mock implementation of enode.Iterator for testing
|
||||
type MockIterator struct {
|
||||
Nodes []*enode.Node
|
||||
Position int
|
||||
Closed bool
|
||||
}
|
||||
|
||||
func NewMockIterator(nodes []*enode.Node) *MockIterator {
|
||||
return &MockIterator{
|
||||
Nodes: nodes,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockIterator) Next() bool {
|
||||
if m.Closed || m.Position >= len(m.Nodes) {
|
||||
return false
|
||||
}
|
||||
m.Position++
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MockIterator) Node() *enode.Node {
|
||||
if m.Position == 0 || m.Position > len(m.Nodes) {
|
||||
return nil
|
||||
}
|
||||
return m.Nodes[m.Position-1]
|
||||
}
|
||||
|
||||
func (m *MockIterator) Close() {
|
||||
m.Closed = true
|
||||
}
|
||||
@@ -228,8 +228,8 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -72,6 +72,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"handlers_equivocation_test.go",
|
||||
"handlers_pool_test.go",
|
||||
"handlers_state_test.go",
|
||||
"handlers_test.go",
|
||||
|
||||
@@ -701,7 +701,7 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
// Validate and optionally broadcast sidecars on equivocation.
|
||||
if err := s.validateBroadcast(ctx, r, genericBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock)
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock.Block)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
@@ -855,7 +855,7 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
// Validate and optionally broadcast sidecars on equivocation.
|
||||
if err := s.validateBroadcast(ctx, r, genericBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock)
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock.Block)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
@@ -1078,6 +1078,10 @@ func (s *Server) validateBlobSidecars(blk interfaces.SignedBeaconBlock, blobs []
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(kzgs) {
|
||||
return errors.New("number of blobs, proofs, and commitments do not match")
|
||||
}
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blk.Block().Slot())
|
||||
if len(blobs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("number of blobs over max, %d > %d", len(blobs), maxBlobsPerBlock)
|
||||
}
|
||||
for i, blob := range blobs {
|
||||
b := kzg4844.Blob(blob)
|
||||
if err := kzg4844.VerifyBlobProof(&b, kzg4844.Commitment(kzgs[i]), kzg4844.Proof(proofs[i])); err != nil {
|
||||
@@ -1220,7 +1224,7 @@ func (s *Server) GetStateFork(w http.ResponseWriter, r *http.Request) {
|
||||
fork := st.Fork()
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status"+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1331,7 +1335,7 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1512,7 +1516,7 @@ func (s *Server) GetFinalityCheckpoints(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1686,7 +1690,7 @@ func (s *Server) GetPendingConsolidations(w http.ResponseWriter, r *http.Request
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1742,7 +1746,7 @@ func (s *Server) GetPendingDeposits(w http.ResponseWriter, r *http.Request) {
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1798,7 +1802,7 @@ func (s *Server) GetPendingPartialWithdrawals(w http.ResponseWriter, r *http.Req
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1851,7 +1855,7 @@ func (s *Server) GetProposerLookahead(w http.ResponseWriter, r *http.Request) {
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
35
beacon-chain/rpc/eth/beacon/handlers_equivocation_test.go
Normal file
35
beacon-chain/rpc/eth/beacon/handlers_equivocation_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
rpctesting "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
// TestBlocks_NewSignedBeaconBlock_EquivocationFix tests that blocks.NewSignedBeaconBlock
|
||||
// correctly handles the fixed case where genericBlock.Block is passed instead of genericBlock
|
||||
func TestBlocks_NewSignedBeaconBlock_EquivocationFix(t *testing.T) {
|
||||
// Parse the Phase0 JSON block
|
||||
var block structs.SignedBeaconBlock
|
||||
err := json.Unmarshal([]byte(rpctesting.Phase0Block), &block)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to generic format
|
||||
genericBlock, err := block.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the FIX: pass genericBlock.Block instead of genericBlock
|
||||
// This is what our fix changed in handlers.go line 704 and 858
|
||||
_, err = blocks.NewSignedBeaconBlock(genericBlock.Block)
|
||||
require.NoError(t, err, "NewSignedBeaconBlock should work with genericBlock.Block")
|
||||
|
||||
// Test the BROKEN version: pass genericBlock directly (this should fail)
|
||||
_, err = blocks.NewSignedBeaconBlock(genericBlock)
|
||||
if err == nil {
|
||||
t.Errorf("NewSignedBeaconBlock should fail with whole genericBlock but succeeded")
|
||||
}
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (s *Server) GetStateRoot(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -125,7 +125,7 @@ func (s *Server) GetRandao(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ func (s *Server) GetSyncCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -4800,6 +4800,222 @@ func Test_validateBlobSidecars(t *testing.T) {
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "could not verify blob proof: can't verify opening proof", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
blobs := [][]byte{}
|
||||
commitments := [][]byte{}
|
||||
proofs := [][]byte{}
|
||||
for i := 0; i < 10; i++ {
|
||||
blobs = append(blobs, blob[:])
|
||||
commitments = append(commitments, commitment[:])
|
||||
proofs = append(proofs, proof[:])
|
||||
}
|
||||
t.Run("pre-Deneb block should return early", func(t *testing.T) {
|
||||
// Create a pre-Deneb block (e.g., Capella)
|
||||
blk := util.NewBeaconBlockCapella()
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should return nil for pre-Deneb blocks regardless of blobs
|
||||
require.NoError(t, s.validateBlobSidecars(b, [][]byte{}, [][]byte{}))
|
||||
require.NoError(t, s.validateBlobSidecars(b, blobs[:1], proofs[:1]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block with valid single blob", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
})
|
||||
|
||||
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:6]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with exactly 6 blobs
|
||||
require.NoError(t, s.validateBlobSidecars(b, blobs[:6], proofs[:6]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:7]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 7 blobs when max is 6
|
||||
err = s.validateBlobSidecars(b, blobs[:7], proofs[:7])
|
||||
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
|
||||
})
|
||||
|
||||
t.Run("Electra block with valid blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot (epoch 5+)
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with 9 blobs in Electra
|
||||
require.NoError(t, s.validateBlobSidecars(b, blobs[:9], proofs[:9]))
|
||||
})
|
||||
|
||||
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 10 blobs when max is 9
|
||||
err = s.validateBlobSidecars(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
|
||||
t.Run("Fulu block with valid blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Fulu slot (epoch 10+)
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with 9 blobs in Fulu
|
||||
require.NoError(t, s.validateBlobSidecars(b, blobs[:9], proofs[:9]))
|
||||
})
|
||||
|
||||
t.Run("Fulu block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Fulu slot (epoch 10+)
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 10 blobs when max is 9
|
||||
err = s.validateBlobSidecars(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
|
||||
t.Run("BlobSchedule with progressive increases (BPO)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.FuluForkEpoch = 200
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
// Define blob schedule with progressive increases
|
||||
testCfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
}
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
s := &Server{}
|
||||
|
||||
// Test epoch 0-9: max 3 blobs
|
||||
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 5 // Epoch 0
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:3]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobSidecars(b, blobs[:3], proofs[:3]))
|
||||
|
||||
// Should fail with 4 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:4]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobSidecars(b, blobs[:4], proofs[:4])
|
||||
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
|
||||
})
|
||||
|
||||
// Test epoch 30+: max 9 blobs
|
||||
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 960 // Epoch 30
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobSidecars(b, blobs[:9], proofs[:9]))
|
||||
|
||||
// Should fail with 10 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobSidecars(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetPendingConsolidations(t *testing.T) {
|
||||
|
||||
@@ -44,7 +44,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -222,7 +222,7 @@ func (s *Server) GetValidator(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -258,7 +258,7 @@ func (s *Server) GetValidatorBalances(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -419,7 +419,7 @@ func (s *Server) getValidatorIdentitiesJSON(
|
||||
) {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
@@ -9,7 +9,6 @@ go_library(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
@@ -24,8 +23,6 @@ go_test(
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@@ -35,34 +34,25 @@ func GetForkSchedule(w http.ResponseWriter, r *http.Request) {
|
||||
_, span := trace.StartSpan(r.Context(), "config.GetForkSchedule")
|
||||
defer span.End()
|
||||
|
||||
schedule := params.BeaconConfig().ForkVersionSchedule
|
||||
schedule := params.SortedForkSchedule()
|
||||
data := make([]*structs.Fork, 0, len(schedule))
|
||||
if len(schedule) == 0 {
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: make([]*structs.Fork, 0),
|
||||
Data: data,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
versions := forks.SortedForkVersions(schedule)
|
||||
chainForks := make([]*structs.Fork, len(schedule))
|
||||
var previous, current []byte
|
||||
for i, v := range versions {
|
||||
if i == 0 {
|
||||
previous = params.BeaconConfig().GenesisForkVersion
|
||||
} else {
|
||||
previous = current
|
||||
}
|
||||
copyV := v
|
||||
current = copyV[:]
|
||||
chainForks[i] = &structs.Fork{
|
||||
PreviousVersion: hexutil.Encode(previous),
|
||||
CurrentVersion: hexutil.Encode(current),
|
||||
Epoch: fmt.Sprintf("%d", schedule[v]),
|
||||
}
|
||||
previous := schedule[0]
|
||||
for _, entry := range schedule {
|
||||
data = append(data, &structs.Fork{
|
||||
PreviousVersion: hexutil.Encode(previous.ForkVersion[:]),
|
||||
CurrentVersion: hexutil.Encode(entry.ForkVersion[:]),
|
||||
Epoch: fmt.Sprintf("%d", entry.Epoch),
|
||||
})
|
||||
previous = entry
|
||||
}
|
||||
|
||||
httputil.WriteJson(w, &structs.GetForkScheduleResponse{
|
||||
Data: chainForks,
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -592,43 +590,34 @@ func TestGetSpec(t *testing.T) {
|
||||
|
||||
func TestForkSchedule_Ok(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
genesisForkVersion := []byte("Genesis")
|
||||
firstForkVersion, firstForkEpoch := []byte("Firs"), primitives.Epoch(100)
|
||||
secondForkVersion, secondForkEpoch := []byte("Seco"), primitives.Epoch(200)
|
||||
thirdForkVersion, thirdForkEpoch := []byte("Thir"), primitives.Epoch(300)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisForkVersion = genesisForkVersion
|
||||
// Create fork schedule adding keys in non-sorted order.
|
||||
schedule := make(map[[4]byte]primitives.Epoch, 3)
|
||||
schedule[bytesutil.ToBytes4(secondForkVersion)] = secondForkEpoch
|
||||
schedule[bytesutil.ToBytes4(firstForkVersion)] = firstForkEpoch
|
||||
schedule[bytesutil.ToBytes4(thirdForkVersion)] = thirdForkEpoch
|
||||
config.ForkVersionSchedule = schedule
|
||||
params.OverrideBeaconConfig(config)
|
||||
config.InitializeForkSchedule()
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
genesisStr, firstStr, secondStr := hexutil.Encode(config.GenesisForkVersion), hexutil.Encode(config.AltairForkVersion), hexutil.Encode(config.BellatrixForkVersion)
|
||||
GetForkSchedule(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetForkScheduleResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 3, len(resp.Data))
|
||||
schedule := params.SortedForkSchedule()
|
||||
require.Equal(t, len(schedule), len(resp.Data))
|
||||
fork := resp.Data[0]
|
||||
assert.DeepEqual(t, hexutil.Encode(genesisForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", firstForkEpoch), fork.Epoch)
|
||||
assert.Equal(t, genesisStr, fork.PreviousVersion)
|
||||
assert.Equal(t, genesisStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.GenesisEpoch), fork.Epoch)
|
||||
fork = resp.Data[1]
|
||||
assert.DeepEqual(t, hexutil.Encode(firstForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", secondForkEpoch), fork.Epoch)
|
||||
assert.Equal(t, genesisStr, fork.PreviousVersion)
|
||||
assert.Equal(t, firstStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.AltairForkEpoch), fork.Epoch)
|
||||
fork = resp.Data[2]
|
||||
assert.DeepEqual(t, hexutil.Encode(secondForkVersion), fork.PreviousVersion)
|
||||
assert.DeepEqual(t, hexutil.Encode(thirdForkVersion), fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", thirdForkEpoch), fork.Epoch)
|
||||
assert.Equal(t, firstStr, fork.PreviousVersion)
|
||||
assert.Equal(t, secondStr, fork.CurrentVersion)
|
||||
assert.Equal(t, fmt.Sprintf("%d", config.BellatrixForkEpoch), fork.Epoch)
|
||||
})
|
||||
t.Run("correct number of forks", func(t *testing.T) {
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v1/config/fork_schedule", nil)
|
||||
@@ -639,8 +628,8 @@ func TestForkSchedule_Ok(t *testing.T) {
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.GetForkScheduleResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
os := forks.NewOrderedSchedule(params.BeaconConfig())
|
||||
assert.Equal(t, os.Len(), len(resp.Data))
|
||||
os := params.SortedForkSchedule()
|
||||
assert.Equal(t, len(os), len(resp.Data))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *Server) getBeaconStateV2(ctx context.Context, w http.ResponseWriter, id
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, id, s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if state is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -21,6 +22,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -40,6 +42,7 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -57,5 +60,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,11 +2,14 @@ package helpers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/lookup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -28,6 +31,22 @@ func PrepareStateFetchGRPCError(err error) error {
|
||||
return status.Errorf(codes.Internal, "Invalid state ID: %v", err)
|
||||
}
|
||||
|
||||
// HandleIsOptimisticError handles errors from IsOptimistic function calls and writes appropriate HTTP responses.
|
||||
func HandleIsOptimisticError(w http.ResponseWriter, err error) {
|
||||
var fetchErr *lookup.FetchStateError
|
||||
if errors.As(err, &fetchErr) {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
|
||||
if errors.As(err, &blockRootsNotFoundErr) {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// IndexedVerificationFailure represents a collection of verification failures.
|
||||
type IndexedVerificationFailure struct {
|
||||
Failures []*SingleIndexedVerificationFailure `json:"failures"`
|
||||
|
||||
@@ -56,7 +56,8 @@ func IsOptimistic(
|
||||
if bytesutil.IsHex(stateId) {
|
||||
id, err := hexutil.Decode(stateIdString)
|
||||
if err != nil {
|
||||
return false, err
|
||||
e := lookup.NewStateIdParseError(err)
|
||||
return false, &e
|
||||
}
|
||||
return isStateRootOptimistic(ctx, id, optimisticModeFetcher, stateFetcher, chainInfo, database)
|
||||
} else if len(stateId) == 32 {
|
||||
@@ -127,7 +128,7 @@ func isStateRootOptimistic(
|
||||
) (bool, error) {
|
||||
st, err := stateFetcher.State(ctx, stateId)
|
||||
if err != nil {
|
||||
return true, errors.Wrap(err, "could not fetch state")
|
||||
return true, lookup.NewFetchStateError(err)
|
||||
}
|
||||
if st.Slot() == chainInfo.HeadSlot() {
|
||||
return optimisticModeFetcher.IsOptimistic(ctx)
|
||||
@@ -137,7 +138,7 @@ func isStateRootOptimistic(
|
||||
return true, errors.Wrapf(err, "could not get block roots for slot %d", st.Slot())
|
||||
}
|
||||
if !has {
|
||||
return true, errors.New("no block roots returned from the database")
|
||||
return true, lookup.NewBlockRootsNotFoundError()
|
||||
}
|
||||
for _, r := range roots {
|
||||
b, err := database.Block(ctx, r)
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
chainmock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/lookup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
@@ -21,6 +24,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestIsOptimistic(t *testing.T) {
|
||||
@@ -226,7 +230,67 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("State not found", func(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
chainSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
|
||||
mf := &testutil.MockStater{
|
||||
CustomError: lookup.NewFetchStateError(nil),
|
||||
}
|
||||
_, err = IsOptimistic(ctx, []byte(hexutil.Encode(bytesutil.PadTo([]byte("root"), 32))), cs, mf, cs, db)
|
||||
var fetchErr *lookup.FetchStateError
|
||||
require.Equal(t, true, errors.As(err, &fetchErr))
|
||||
})
|
||||
|
||||
t.Run("stateId invalid", func(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
chainSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
|
||||
mf := &testutil.MockStater{
|
||||
CustomError: lookup.NewFetchStateError(nil),
|
||||
}
|
||||
_, err = IsOptimistic(ctx, []byte("0xabc"), cs, mf, cs, db)
|
||||
var fetchErr *lookup.FetchStateError
|
||||
require.Equal(t, false, errors.As(err, &fetchErr))
|
||||
})
|
||||
t.Run("block roots not found", func(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
chainSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(fieldparams.SlotsPerEpoch+1)))
|
||||
require.NoError(t, err)
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
_, err = IsOptimistic(ctx, []byte(hexutil.Encode(bytesutil.PadTo([]byte("root"), 32))), cs, mf, cs, db)
|
||||
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
|
||||
require.Equal(t, true, errors.As(err, &blockRootsNotFoundErr))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
t.Run("head is not optimistic", func(t *testing.T) {
|
||||
cs := &chainmock.ChainService{Optimistic: false}
|
||||
@@ -319,6 +383,36 @@ func TestIsOptimistic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestHandleIsOptimisticError(t *testing.T) {
|
||||
t.Run("fetch-state error handled as 404", func(t *testing.T) {
|
||||
rr := httptest.NewRecorder()
|
||||
notFoundErr := lookup.StateNotFoundError{}
|
||||
fetchErr := lookup.NewFetchStateError(¬FoundErr)
|
||||
HandleIsOptimisticError(rr, fetchErr)
|
||||
|
||||
require.Equal(t, http.StatusNotFound, rr.Code)
|
||||
require.StringContains(t, notFoundErr.Error(), rr.Body.String())
|
||||
})
|
||||
t.Run("no block roots error handled as 404", func(t *testing.T) {
|
||||
rr := httptest.NewRecorder()
|
||||
blockRootsErr := lookup.NewBlockRootsNotFoundError()
|
||||
HandleIsOptimisticError(rr, blockRootsErr)
|
||||
|
||||
require.Equal(t, http.StatusNotFound, rr.Code)
|
||||
require.StringContains(t, blockRootsErr.Error(), rr.Body.String())
|
||||
})
|
||||
|
||||
t.Run("generic error handled as 500", func(t *testing.T) {
|
||||
rr := httptest.NewRecorder()
|
||||
genericErr := errors.New("boom")
|
||||
|
||||
HandleIsOptimisticError(rr, genericErr)
|
||||
|
||||
require.Equal(t, http.StatusInternalServerError, rr.Code)
|
||||
require.StringContains(t, "Could not check optimistic status: boom", rr.Body.String())
|
||||
})
|
||||
}
|
||||
|
||||
// prepareForkchoiceState prepares a beacon state with the given data to mock
|
||||
// insert into forkchoice
|
||||
func prepareForkchoiceState(
|
||||
|
||||
@@ -12,12 +12,10 @@ go_library(
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
|
||||
@@ -7,12 +7,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/core/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -111,17 +109,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
|
||||
updateSlot := update.AttestedHeader().Beacon().Slot
|
||||
updateEpoch := slots.ToEpoch(updateSlot)
|
||||
updateFork, err := forks.Fork(updateEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get fork Version: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
forkDigest, err := signing.ComputeForkDigest(updateFork.CurrentVersion, params.BeaconConfig().GenesisValidatorsRoot[:])
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not compute fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
updateEntry := params.GetNetworkScheduleEntry(updateEpoch)
|
||||
updateSSZ, err := update.MarshalSSZ()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not marshal update to SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
@@ -133,7 +121,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
if _, err := w.Write(chunkLength); err != nil {
|
||||
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
if _, err := w.Write(forkDigest[:]); err != nil {
|
||||
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
|
||||
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
if _, err := w.Write(updateSSZ); err != nil {
|
||||
|
||||
@@ -23,6 +23,20 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type BlockRootsNotFoundError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func NewBlockRootsNotFoundError() *BlockRootsNotFoundError {
|
||||
return &BlockRootsNotFoundError{
|
||||
message: "no block roots returned from the database",
|
||||
}
|
||||
}
|
||||
|
||||
func (e BlockRootsNotFoundError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// BlockIdParseError represents an error scenario where a block ID could not be parsed.
|
||||
type BlockIdParseError struct {
|
||||
message string
|
||||
@@ -341,7 +355,7 @@ func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indic
|
||||
stored := summary.Stored()
|
||||
count := uint64(len(stored))
|
||||
|
||||
if count < peerdas.MinimumColumnsCountToReconstruct() {
|
||||
if count < peerdas.MinimumColumnCountToReconstruct() {
|
||||
// There is no way to reconstruct the data columns.
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.SubscribeAllDataSubnets.Name),
|
||||
|
||||
@@ -443,7 +443,7 @@ func TestGetBlob(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnsCountToReconstruct()+1])
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnCountToReconstruct()+1])
|
||||
require.NoError(t, err)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
|
||||
@@ -21,6 +21,27 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type FetchStateError struct {
|
||||
message string
|
||||
cause error
|
||||
}
|
||||
|
||||
func NewFetchStateError(cause error) *FetchStateError {
|
||||
return &FetchStateError{
|
||||
message: "could not fetch state",
|
||||
cause: cause,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *FetchStateError) Error() string {
|
||||
if e.cause != nil {
|
||||
return e.message + ": " + e.cause.Error()
|
||||
}
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (e *FetchStateError) Unwrap() error { return e.cause }
|
||||
|
||||
// StateIdParseError represents an error scenario where a state ID could not be parsed.
|
||||
type StateIdParseError struct {
|
||||
message string
|
||||
|
||||
@@ -56,11 +56,7 @@ func (s *Server) GetValidatorCount(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateID), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
errJson := &httputil.DefaultJsonError{
|
||||
Message: fmt.Sprintf("could not check if slot's block is optimistic: %v", err),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
httputil.WriteError(w, errJson)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,10 @@ func (_ *Server) GetBeaconConfig(_ context.Context, _ *emptypb.Empty) (*ethpb.Be
|
||||
numFields := val.Type().NumField()
|
||||
res := make(map[string]string, numFields)
|
||||
for i := 0; i < numFields; i++ {
|
||||
res[val.Type().Field(i).Name] = fmt.Sprintf("%v", val.Field(i).Interface())
|
||||
field := val.Type().Field(i)
|
||||
if field.IsExported() {
|
||||
res[field.Name] = fmt.Sprintf("%v", val.Field(i).Interface())
|
||||
}
|
||||
}
|
||||
return ðpb.BeaconConfig{
|
||||
Config: res,
|
||||
|
||||
@@ -17,10 +17,19 @@ func TestServer_GetBeaconConfig(t *testing.T) {
|
||||
res, err := bs.GetBeaconConfig(ctx, &emptypb.Empty{})
|
||||
require.NoError(t, err)
|
||||
conf := params.BeaconConfig()
|
||||
numFields := reflect.TypeOf(conf).Elem().NumField()
|
||||
confType := reflect.TypeOf(conf).Elem()
|
||||
numFields := confType.NumField()
|
||||
|
||||
// Count only exported fields, as unexported fields are not included in the config
|
||||
exportedFields := 0
|
||||
for i := 0; i < numFields; i++ {
|
||||
if confType.Field(i).IsExported() {
|
||||
exportedFields++
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the result has the same number of items as our config struct.
|
||||
assert.Equal(t, numFields, len(res.Config), "Unexpected number of items in config")
|
||||
// Check if the result has the same number of items as exported fields in our config struct.
|
||||
assert.Equal(t, exportedFields, len(res.Config), "Unexpected number of items in config")
|
||||
want := fmt.Sprintf("%d", conf.Eth1FollowDistance)
|
||||
|
||||
// Check that an element is properly populated from the config.
|
||||
|
||||
@@ -81,10 +81,10 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
@@ -155,6 +155,7 @@ common_deps = [
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -248,6 +248,8 @@ func (vs *Server) sendBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltair
|
||||
b.Block = ðpb.StreamBlocksResponse_DenebBlock{DenebBlock: p}
|
||||
case *ethpb.SignedBeaconBlockElectra:
|
||||
b.Block = ðpb.StreamBlocksResponse_ElectraBlock{ElectraBlock: p}
|
||||
case *ethpb.SignedBeaconBlockFulu:
|
||||
b.Block = ðpb.StreamBlocksResponse_FuluBlock{FuluBlock: p}
|
||||
default:
|
||||
log.Errorf("Unknown block type %T", p)
|
||||
}
|
||||
|
||||
@@ -381,3 +381,92 @@ func TestServer_StreamSlotsVerified_OnHeadUpdated(t *testing.T) {
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamBlocksVerified_FuluBlock(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
beaconState, privs := util.DeterministicGenesisStateFulu(t, 32)
|
||||
c, err := altair.NextSyncCommittee(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
|
||||
|
||||
b, err := util.GenerateFullBlockFulu(beaconState, privs, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wrappedBlk := util.SaveBlock(t, ctx, db, b)
|
||||
chainService := &chainMock.ChainService{State: beaconState}
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
|
||||
mockStream.EXPECT().Send(ðpb.StreamBlocksResponse{Block: ðpb.StreamBlocksResponse_FuluBlock{FuluBlock: b}}).Do(func(arg0 interface{}) {
|
||||
exitRoutine <- true
|
||||
})
|
||||
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
||||
|
||||
go func(tt *testing.T) {
|
||||
err := server.StreamBlocksAltair(ðpb.StreamBlocksRequest{VerifiedOnly: true}, mockStream)
|
||||
if s, _ := status.FromError(err); s.Code() != codes.Canceled {
|
||||
assert.NoError(tt, err)
|
||||
}
|
||||
}(t)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = server.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{Slot: b.Block.Slot, BlockRoot: r, SignedBlock: wrappedBlk},
|
||||
})
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamBlocks_FuluBlock(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.BeaconConfig())
|
||||
ctx := t.Context()
|
||||
beaconState, privs := util.DeterministicGenesisStateFulu(t, 64)
|
||||
c, err := altair.NextSyncCommittee(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
|
||||
|
||||
b, err := util.GenerateFullBlockFulu(beaconState, privs, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
chainService := &chainMock.ChainService{State: beaconState}
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
BlockNotifier: chainService.BlockNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
|
||||
|
||||
mockStream.EXPECT().Send(ðpb.StreamBlocksResponse{Block: ðpb.StreamBlocksResponse_FuluBlock{FuluBlock: b}}).Do(func(arg0 interface{}) {
|
||||
exitRoutine <- true
|
||||
})
|
||||
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
||||
|
||||
go func(tt *testing.T) {
|
||||
err := server.StreamBlocksAltair(ðpb.StreamBlocksRequest{}, mockStream)
|
||||
if s, _ := status.FromError(err); s.Code() != codes.Canceled {
|
||||
assert.NoError(tt, err)
|
||||
}
|
||||
}(t)
|
||||
wrappedBlk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = server.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: wrappedBlk},
|
||||
})
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
@@ -564,7 +564,7 @@ func TestBuildValidatorAssignmentMap(t *testing.T) {
|
||||
start := primitives.Slot(200)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}}, // slot 200, committee 0
|
||||
{{7, 8, 9}}, // slot 201, committee 0
|
||||
{{7, 8, 9}}, // slot 201, committee 0
|
||||
{{4, 5}, {10, 11}}, // slot 202, committee 0 & 1
|
||||
}
|
||||
|
||||
@@ -632,7 +632,7 @@ func TestGetValidatorAssignment_WithAssignmentMap(t *testing.T) {
|
||||
}
|
||||
|
||||
vs := &Server{}
|
||||
|
||||
|
||||
// Test existing validator (validator 2 is at position 1 in the committee, not position 2)
|
||||
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(2))
|
||||
require.NotNil(t, assignment)
|
||||
@@ -662,7 +662,7 @@ func TestGetValidatorAssignment_WithoutAssignmentMap(t *testing.T) {
|
||||
}
|
||||
|
||||
vs := &Server{}
|
||||
|
||||
|
||||
// Test existing validator
|
||||
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(5))
|
||||
require.NotNil(t, assignment)
|
||||
@@ -682,24 +682,24 @@ func TestLoadMetadata_ThresholdBehavior(t *testing.T) {
|
||||
epoch := primitives.Epoch(0)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
numValidators int
|
||||
expectAssignmentMap bool
|
||||
name string
|
||||
numValidators int
|
||||
expectAssignmentMap bool
|
||||
}{
|
||||
{
|
||||
name: "Small request - below threshold",
|
||||
numValidators: 100,
|
||||
expectAssignmentMap: false,
|
||||
name: "Small request - below threshold",
|
||||
numValidators: 100,
|
||||
expectAssignmentMap: false,
|
||||
},
|
||||
{
|
||||
name: "Large request - at threshold",
|
||||
numValidators: validatorLookupThreshold,
|
||||
expectAssignmentMap: true,
|
||||
name: "Large request - at threshold",
|
||||
numValidators: validatorLookupThreshold,
|
||||
expectAssignmentMap: true,
|
||||
},
|
||||
{
|
||||
name: "Large request - above threshold",
|
||||
numValidators: validatorLookupThreshold + 1000,
|
||||
expectAssignmentMap: true,
|
||||
name: "Large request - above threshold",
|
||||
numValidators: validatorLookupThreshold + 1000,
|
||||
expectAssignmentMap: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -286,6 +286,19 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
|
||||
}
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
|
||||
}
|
||||
|
||||
// For post-Fulu blinded blocks, submit to relay and return early
|
||||
if block.IsBlinded() && slots.ToEpoch(block.Block().Slot()) >= params.BeaconConfig().FuluForkEpoch {
|
||||
err := vs.BlockBuilder.SubmitBlindedBlockPostFulu(ctx, block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not submit blinded block post-Fulu: %v", err)
|
||||
}
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
var sidecars []*ethpb.BlobSidecar
|
||||
if block.IsBlinded() {
|
||||
@@ -297,11 +310,6 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
}
|
||||
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
@@ -327,7 +335,8 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks.
|
||||
// handleBlindedBlock processes blinded beacon blocks (pre-Fulu only).
|
||||
// Post-Fulu blinded blocks are handled directly in ProposeBeaconBlock.
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
|
||||
if block.Version() < version.Bellatrix {
|
||||
return nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/ssz"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -220,16 +219,11 @@ func (vs *Server) getPayloadHeaderFromBuilder(
|
||||
return nil, errors.New("builder returned nil bid")
|
||||
}
|
||||
bidVersion := signedBid.Version()
|
||||
fork, err := forks.Fork(slots.ToEpoch(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get fork information")
|
||||
}
|
||||
forkVersion, ok := params.ConfigForkVersions(params.BeaconConfig())[bytesutil.ToBytes4(fork.CurrentVersion)]
|
||||
if !ok {
|
||||
return nil, errors.New("unable to find current fork in schedule")
|
||||
}
|
||||
epoch := slots.ToEpoch(slot)
|
||||
entry := params.GetNetworkScheduleEntry(epoch)
|
||||
forkVersion := entry.VersionEnum
|
||||
if !isVersionCompatible(bidVersion, forkVersion) {
|
||||
return nil, fmt.Errorf("builder bid response version: %d is not compatible with expected version: %d for epoch %d", bidVersion, forkVersion, slots.ToEpoch(slot))
|
||||
return nil, fmt.Errorf("builder bid response version: %d is not compatible with expected version: %d for epoch %d", bidVersion, forkVersion, epoch)
|
||||
}
|
||||
|
||||
bid, err := signedBid.Message()
|
||||
|
||||
@@ -3286,3 +3286,252 @@ func TestProposer_ElectraBlobsAndProofs(t *testing.T) {
|
||||
require.Equal(t, 10, len(blobs))
|
||||
require.Equal(t, 10, len(proofs))
|
||||
}
|
||||
|
||||
func TestServer_ProposeBeaconBlock_PostFuluBlindedBlock(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
beaconState, parentRoot, _ := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 100)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
|
||||
t.Run("post-Fulu blinded block - early return success", func(t *testing.T) {
|
||||
// Set up config with Fulu fork at epoch 5
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
mockBuilder := &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
Cfg: &builderTest.Config{BeaconDB: db},
|
||||
ErrSubmitBlindedBlockPostFulu: nil, // Success case
|
||||
}
|
||||
|
||||
c := &mock.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
BlockReceiver: c,
|
||||
BlobReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: c,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
BlockBuilder: mockBuilder,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
|
||||
// Create a blinded block at slot 160 (epoch 5, which is >= FuluForkEpoch)
|
||||
blindedBlock := util.NewBlindedBeaconBlockDeneb()
|
||||
blindedBlock.Message.Slot = 160 // This puts us at epoch 5 (160/32 = 5)
|
||||
blindedBlock.Message.ProposerIndex = 0
|
||||
blindedBlock.Message.ParentRoot = parentRoot[:]
|
||||
blindedBlock.Message.StateRoot = make([]byte, 32)
|
||||
|
||||
req := ðpb.GenericSignedBeaconBlock{
|
||||
Block: ðpb.GenericSignedBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock},
|
||||
}
|
||||
|
||||
// This should trigger the post-Fulu early return path
|
||||
res, err := proposerServer.ProposeBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.NotEmpty(t, res.BlockRoot)
|
||||
})
|
||||
|
||||
t.Run("post-Fulu blinded block - builder submission error", func(t *testing.T) {
|
||||
// Set up config with Fulu fork at epoch 5
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
mockBuilder := &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
Cfg: &builderTest.Config{BeaconDB: db},
|
||||
ErrSubmitBlindedBlockPostFulu: errors.New("post-Fulu builder submission failed"),
|
||||
}
|
||||
|
||||
c := &mock.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
BlockReceiver: c,
|
||||
BlobReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: c,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
BlockBuilder: mockBuilder,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
|
||||
// Create a blinded block at slot 160 (epoch 5)
|
||||
blindedBlock := util.NewBlindedBeaconBlockDeneb()
|
||||
blindedBlock.Message.Slot = 160
|
||||
blindedBlock.Message.ProposerIndex = 0
|
||||
blindedBlock.Message.ParentRoot = parentRoot[:]
|
||||
blindedBlock.Message.StateRoot = make([]byte, 32)
|
||||
|
||||
req := ðpb.GenericSignedBeaconBlock{
|
||||
Block: ðpb.GenericSignedBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock},
|
||||
}
|
||||
|
||||
_, err := proposerServer.ProposeBeaconBlock(ctx, req)
|
||||
require.ErrorContains(t, "Could not submit blinded block post-Fulu", err)
|
||||
require.ErrorContains(t, "post-Fulu builder submission failed", err)
|
||||
})
|
||||
|
||||
t.Run("pre-Fulu blinded block - uses regular handleBlindedBlock path", func(t *testing.T) {
|
||||
// Set up config with Fulu fork at epoch 10 (future)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 10
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
mockBuilder := &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
Cfg: &builderTest.Config{BeaconDB: db},
|
||||
PayloadDeneb: &enginev1.ExecutionPayloadDeneb{},
|
||||
BlobBundle: &enginev1.BlobsBundle{},
|
||||
}
|
||||
|
||||
c := &mock.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
BlockReceiver: c,
|
||||
BlobReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: c,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
BlockBuilder: mockBuilder,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
|
||||
// Create a blinded block at slot 160 (epoch 5, which is < FuluForkEpoch=10)
|
||||
blindedBlock := util.NewBlindedBeaconBlockDeneb()
|
||||
blindedBlock.Message.Slot = 160
|
||||
blindedBlock.Message.ProposerIndex = 0
|
||||
blindedBlock.Message.ParentRoot = parentRoot[:]
|
||||
blindedBlock.Message.StateRoot = make([]byte, 32)
|
||||
|
||||
req := ðpb.GenericSignedBeaconBlock{
|
||||
Block: ðpb.GenericSignedBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock},
|
||||
}
|
||||
|
||||
// This should NOT trigger the post-Fulu early return path, but use handleBlindedBlock instead
|
||||
res, err := proposerServer.ProposeBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.NotEmpty(t, res.BlockRoot)
|
||||
})
|
||||
|
||||
t.Run("boundary test - exactly at Fulu fork epoch", func(t *testing.T) {
|
||||
// Set up config with Fulu fork at epoch 5
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
mockBuilder := &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
Cfg: &builderTest.Config{BeaconDB: db},
|
||||
ErrSubmitBlindedBlockPostFulu: nil,
|
||||
}
|
||||
|
||||
c := &mock.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
BlockReceiver: c,
|
||||
BlobReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: c,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
BlockBuilder: mockBuilder,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
|
||||
// Create a blinded block at slot 160 (exactly epoch 5)
|
||||
blindedBlock := util.NewBlindedBeaconBlockDeneb()
|
||||
blindedBlock.Message.Slot = 160 // 160/32 = 5 (exactly at FuluForkEpoch)
|
||||
blindedBlock.Message.ProposerIndex = 0
|
||||
blindedBlock.Message.ParentRoot = parentRoot[:]
|
||||
blindedBlock.Message.StateRoot = make([]byte, 32)
|
||||
|
||||
req := ðpb.GenericSignedBeaconBlock{
|
||||
Block: ðpb.GenericSignedBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock},
|
||||
}
|
||||
|
||||
// Should trigger post-Fulu path since epoch 5 >= FuluForkEpoch (5)
|
||||
res, err := proposerServer.ProposeBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.NotEmpty(t, res.BlockRoot)
|
||||
})
|
||||
|
||||
t.Run("unblinded block - not affected by post-Fulu condition", func(t *testing.T) {
|
||||
// Set up config with Fulu fork at epoch 5
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
c := &mock.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
BlockReceiver: c,
|
||||
BlobReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: c,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
|
||||
// Create an unblinded block at slot 160 (epoch 5)
|
||||
unblindeBlock := util.NewBeaconBlockDeneb()
|
||||
unblindeBlock.Block.Slot = 160
|
||||
unblindeBlock.Block.ProposerIndex = 0
|
||||
unblindeBlock.Block.ParentRoot = parentRoot[:]
|
||||
unblindeBlock.Block.StateRoot = make([]byte, 32)
|
||||
|
||||
req := ðpb.GenericSignedBeaconBlock{
|
||||
Block: ðpb.GenericSignedBeaconBlock_Deneb{
|
||||
Deneb: ðpb.SignedBeaconBlockContentsDeneb{
|
||||
Block: unblindeBlock,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Unblinded blocks should not trigger post-Fulu condition, even at epoch >= FuluForkEpoch
|
||||
res, err := proposerServer.ProposeBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.NotEmpty(t, res.BlockRoot)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
@@ -28,10 +29,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/network/forks"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
@@ -155,32 +157,31 @@ func (vs *Server) ValidatorIndex(ctx context.Context, req *ethpb.ValidatorIndexR
|
||||
//
|
||||
// DomainData fetches the current domain version information from the beacon state.
|
||||
func (vs *Server) DomainData(ctx context.Context, request *ethpb.DomainRequest) (*ethpb.DomainResponse, error) {
|
||||
fork, err := forks.Fork(request.Epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
headGenesisValidatorsRoot := vs.HeadFetcher.HeadGenesisValidatorsRoot()
|
||||
isExitDomain := [4]byte(request.Domain) == params.BeaconConfig().DomainVoluntaryExit
|
||||
if isExitDomain {
|
||||
epoch := request.Epoch
|
||||
rd := bytesutil.ToBytes4(request.Domain)
|
||||
if bytes.Equal(request.Domain, params.BeaconConfig().DomainVoluntaryExit[:]) {
|
||||
hs, err := vs.HeadFetcher.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hs.Version() >= version.Deneb {
|
||||
fork = ðpb.Fork{
|
||||
if slots.ToEpoch(hs.Slot()) >= params.BeaconConfig().DenebForkEpoch {
|
||||
return computeDomainData(rd, epoch, ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
dv, err := signing.Domain(fork, request.Epoch, bytesutil.ToBytes4(request.Domain), headGenesisValidatorsRoot[:])
|
||||
return computeDomainData(rd, epoch, params.ForkFromConfig(params.BeaconConfig(), epoch))
|
||||
}
|
||||
|
||||
func computeDomainData(domain [4]byte, epoch primitives.Epoch, fork *ethpb.Fork) (*ethpb.DomainResponse, error) {
|
||||
gvr := genesis.ValidatorsRoot()
|
||||
domainData, err := signing.Domain(fork, epoch, domain, gvr[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ðpb.DomainResponse{
|
||||
SignatureDomain: dv,
|
||||
}, nil
|
||||
return ðpb.DomainResponse{SignatureDomain: domainData}, nil
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
|
||||
@@ -2,6 +2,7 @@ package validator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,11 +18,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/mock"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"go.uber.org/mock/gomock"
|
||||
"google.golang.org/grpc/codes"
|
||||
@@ -317,55 +320,63 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
|
||||
require.LogsContain(t, hook, "Sending genesis time")
|
||||
}
|
||||
|
||||
func TestServer_DomainData_Exits(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.ForkVersionSchedule = map[[4]byte]primitives.Epoch{
|
||||
[4]byte(cfg.GenesisForkVersion): primitives.Epoch(0),
|
||||
[4]byte(cfg.AltairForkVersion): primitives.Epoch(5),
|
||||
[4]byte(cfg.BellatrixForkVersion): primitives.Epoch(10),
|
||||
[4]byte(cfg.CapellaForkVersion): primitives.Epoch(15),
|
||||
[4]byte(cfg.DenebForkVersion): primitives.Epoch(20),
|
||||
}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
beaconState := ðpb.BeaconStateBellatrix{
|
||||
Slot: 4000,
|
||||
}
|
||||
block := util.NewBeaconBlock()
|
||||
genesisRoot, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
s, err := state_native.InitializeFromProtoUnsafeBellatrix(beaconState)
|
||||
func testSigDomainForSlot(t *testing.T, domain [4]byte, chsrv *mockChain.ChainService, epoch primitives.Epoch) *ethpb.DomainResponse {
|
||||
cfg := params.BeaconConfig()
|
||||
gvr := genesis.ValidatorsRoot()
|
||||
s, err := state_native.InitializeFromProtoUnsafeDeneb(ðpb.BeaconStateDeneb{
|
||||
Slot: primitives.Slot(epoch) * cfg.SlotsPerEpoch,
|
||||
GenesisValidatorsRoot: gvr[:],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
chsrv.State = s
|
||||
vs := &Server{
|
||||
Ctx: t.Context(),
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
HeadFetcher: &mockChain.ChainService{State: s, Root: genesisRoot[:]},
|
||||
HeadFetcher: chsrv,
|
||||
}
|
||||
|
||||
reqDomain, err := vs.DomainData(t.Context(), ðpb.DomainRequest{
|
||||
Epoch: 100,
|
||||
Domain: params.BeaconConfig().DomainDeposit[:],
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
wantedDomain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, params.BeaconConfig().DenebForkVersion, make([]byte, 32))
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, reqDomain.SignatureDomain, wantedDomain)
|
||||
|
||||
beaconStateNew := ðpb.BeaconStateDeneb{
|
||||
Slot: 4000,
|
||||
}
|
||||
s, err = state_native.InitializeFromProtoUnsafeDeneb(beaconStateNew)
|
||||
require.NoError(t, err)
|
||||
vs.HeadFetcher = &mockChain.ChainService{State: s, Root: genesisRoot[:]}
|
||||
|
||||
reqDomain, err = vs.DomainData(t.Context(), ðpb.DomainRequest{
|
||||
Epoch: 100,
|
||||
Domain: params.BeaconConfig().DomainVoluntaryExit[:],
|
||||
domainResp, err := vs.DomainData(t.Context(), ðpb.DomainRequest{
|
||||
Epoch: epoch,
|
||||
Domain: domain[:],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
wantedDomain, err = signing.ComputeDomain(params.BeaconConfig().DomainVoluntaryExit, params.BeaconConfig().CapellaForkVersion, make([]byte, 32))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.DeepEqual(t, reqDomain.SignatureDomain, wantedDomain)
|
||||
return domainResp
|
||||
}
|
||||
|
||||
func requireSigningEqual(t *testing.T, name string, domain [4]byte, req, want primitives.Epoch, chsrv *mockChain.ChainService) {
|
||||
t.Run(fmt.Sprintf("%s_%#x", name, domain), func(t *testing.T) {
|
||||
gvr := genesis.ValidatorsRoot()
|
||||
resp := testSigDomainForSlot(t, domain, chsrv, req)
|
||||
entry := params.GetNetworkScheduleEntry(want)
|
||||
wanted, err := signing.ComputeDomain(domain, entry.ForkVersion[:], gvr[:])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, hexutil.Encode(wanted), hexutil.Encode(resp.SignatureDomain))
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_DomainData_Exits(t *testing.T) {
|
||||
// This test makes 2 sets of assertions:
|
||||
// - the deposit domain is always computed wrt the fork version at the given epoch
|
||||
// - the exit domain is the same until deneb, at which point it is always computed wrt the capella fork version
|
||||
params.SetActiveTestCleanup(t, params.MainnetConfig())
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
block := util.NewBeaconBlock()
|
||||
genesisRoot, err := block.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
chsrv := &mockChain.ChainService{Root: genesisRoot[:]}
|
||||
last := params.LastForkEpoch()
|
||||
requireSigningEqual(t, "genesis deposit", cfg.DomainDeposit, cfg.GenesisEpoch, cfg.GenesisEpoch, chsrv)
|
||||
requireSigningEqual(t, "altair deposit", cfg.DomainDeposit, cfg.AltairForkEpoch, cfg.AltairForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "bellatrix deposit", cfg.DomainDeposit, cfg.BellatrixForkEpoch, cfg.BellatrixForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "capella deposit", cfg.DomainDeposit, cfg.CapellaForkEpoch, cfg.CapellaForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "deneb deposit", cfg.DomainDeposit, cfg.DenebForkEpoch, cfg.DenebForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "last epoch deposit", cfg.DomainDeposit, last, last, chsrv)
|
||||
|
||||
requireSigningEqual(t, "genesis exit", cfg.DomainVoluntaryExit, cfg.GenesisEpoch, cfg.GenesisEpoch, chsrv)
|
||||
requireSigningEqual(t, "altair exit", cfg.DomainVoluntaryExit, cfg.AltairForkEpoch, cfg.AltairForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "bellatrix exit", cfg.DomainVoluntaryExit, cfg.BellatrixForkEpoch, cfg.BellatrixForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "capella exit", cfg.DomainVoluntaryExit, cfg.CapellaForkEpoch, cfg.CapellaForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "deneb exit", cfg.DomainVoluntaryExit, cfg.DenebForkEpoch, cfg.CapellaForkEpoch, chsrv)
|
||||
requireSigningEqual(t, "last epoch exit", cfg.DomainVoluntaryExit, last, cfg.CapellaForkEpoch, chsrv)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestUnblinder_UnblindBlobSidecars_InvalidBundle(t *testing.T) {
|
||||
func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
|
||||
// Test that the function accepts BlobsBundler interface
|
||||
// This test focuses on the interface change rather than full integration
|
||||
|
||||
|
||||
t.Run("Interface compatibility with BlobsBundle", func(t *testing.T) {
|
||||
// Create a simple pre-Deneb block that will return nil (no processing needed)
|
||||
wBlock, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{
|
||||
@@ -87,7 +87,7 @@ func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
|
||||
t.Run("Function signature accepts BlobsBundler interface", func(t *testing.T) {
|
||||
// This test verifies that the function signature has been updated to accept BlobsBundler
|
||||
// We test this by verifying the code compiles with both types
|
||||
|
||||
|
||||
// Create a simple pre-Deneb block for the interface test
|
||||
wBlock, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockCapella{
|
||||
Block: ðpb.BeaconBlockCapella{
|
||||
@@ -106,7 +106,7 @@ func TestUnblindBlobsSidecars_WithBlobsBundler(t *testing.T) {
|
||||
_, err = unblindBlobsSidecars(wBlock, regularBundle)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify function accepts BlobsBundleV2 through the interface
|
||||
// Verify function accepts BlobsBundleV2 through the interface
|
||||
var bundleV2 enginev1.BlobsBundler = &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{make([]byte, 48)},
|
||||
Proofs: [][]byte{make([]byte, 48)},
|
||||
|
||||
@@ -120,6 +120,7 @@ type Config struct {
|
||||
Router *http.ServeMux
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
LCStore *lightClient.Store
|
||||
@@ -196,6 +197,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlobStorage: s.cfg.BlobStorage,
|
||||
DataColumnStorage: s.cfg.DataColumnStorage,
|
||||
}
|
||||
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
|
||||
coreService := &core.Service{
|
||||
|
||||
@@ -15,10 +15,14 @@ type MockStater struct {
|
||||
BeaconStateRoot []byte
|
||||
StatesBySlot map[primitives.Slot]state.BeaconState
|
||||
StatesByRoot map[[32]byte]state.BeaconState
|
||||
CustomError error
|
||||
}
|
||||
|
||||
// State --
|
||||
func (m *MockStater) State(ctx context.Context, id []byte) (state.BeaconState, error) {
|
||||
if m.CustomError != nil {
|
||||
return nil, m.CustomError
|
||||
}
|
||||
if m.StateProviderFunc != nil {
|
||||
return m.StateProviderFunc(ctx, id)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user