mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
247 Commits
peerdas-sy
...
peerDAS
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
814cec3a34 | ||
|
|
8e52d0c3c6 | ||
|
|
d339e09509 | ||
|
|
8ec460223c | ||
|
|
349d9d2fd0 | ||
|
|
e0aecb9c32 | ||
|
|
4a1ab70929 | ||
|
|
ad427ab08f | ||
|
|
abe36ef5a5 | ||
|
|
295c91ef7a | ||
|
|
6a8f3cacc2 | ||
|
|
7324c16c36 | ||
|
|
601afbe73d | ||
|
|
6d7cc7dcbc | ||
|
|
240cd1d058 | ||
|
|
26d8b6b786 | ||
|
|
92c359456e | ||
|
|
3e1a15a9d9 | ||
|
|
e97005c727 | ||
|
|
cfe29850b2 | ||
|
|
d48ed44c4c | ||
|
|
dbab624f3d | ||
|
|
83e9cc3dbb | ||
|
|
ee03c7cce2 | ||
|
|
c5135f6995 | ||
|
|
29aedac113 | ||
|
|
08fb3812b7 | ||
|
|
07738dd9a4 | ||
|
|
53df29b07f | ||
|
|
00cf1f2507 | ||
|
|
6528fb9cea | ||
|
|
5021131811 | ||
|
|
26cec9d9c7 | ||
|
|
4ed90a02ef | ||
|
|
7d528c75bb | ||
|
|
e7b2953d5a | ||
|
|
acf35e849e | ||
|
|
c826d334a1 | ||
|
|
eace128ee9 | ||
|
|
9161b80a32 | ||
|
|
009a1507a1 | ||
|
|
fa71a6e117 | ||
|
|
d3763d56cf | ||
|
|
461fa50c34 | ||
|
|
6d4e1d5f7a | ||
|
|
415622ec49 | ||
|
|
df65458834 | ||
|
|
2005d5c6f2 | ||
|
|
7d72fbebe7 | ||
|
|
685761666d | ||
|
|
a75974b5f5 | ||
|
|
0725dff5e8 | ||
|
|
0d95d3d022 | ||
|
|
ede560bee1 | ||
|
|
34a1bf835a | ||
|
|
b0bceac9c0 | ||
|
|
e95d1c54cf | ||
|
|
4af3763013 | ||
|
|
a520db7276 | ||
|
|
f8abf0565f | ||
|
|
11a6af9bf9 | ||
|
|
6f8a654874 | ||
|
|
f0c01fdb4b | ||
|
|
a015ae6a29 | ||
|
|
457aa117f3 | ||
|
|
d302b494df | ||
|
|
b3db1b6b74 | ||
|
|
66e4d5e816 | ||
|
|
41f109aa5b | ||
|
|
cfd4ceb4dd | ||
|
|
df211c3384 | ||
|
|
89e78d7da3 | ||
|
|
e76ea84596 | ||
|
|
f10d6e8e16 | ||
|
|
91eb43b595 | ||
|
|
90710ec57d | ||
|
|
3dc65f991e | ||
|
|
4d9789401b | ||
|
|
f72d59b004 | ||
|
|
e25497be3e | ||
|
|
8897a26f84 | ||
|
|
b2a26f2b62 | ||
|
|
09659010f8 | ||
|
|
589042df20 | ||
|
|
312b93e9b1 | ||
|
|
f86f76e447 | ||
|
|
c311e652eb | ||
|
|
6a5d78a331 | ||
|
|
a2fd30497e | ||
|
|
a94561f8dc | ||
|
|
af875b78c9 | ||
|
|
61207bd3ac | ||
|
|
0b6fcd7d17 | ||
|
|
fe2766e716 | ||
|
|
9135d765e1 | ||
|
|
eca87f29d1 | ||
|
|
00821c8f55 | ||
|
|
4b9e92bcd7 | ||
|
|
b01d9005b8 | ||
|
|
8d812d5f0e | ||
|
|
24a3cb2a8b | ||
|
|
66d1d3e248 | ||
|
|
99933678ea | ||
|
|
34f8e1e92b | ||
|
|
a6a41a8755 | ||
|
|
f110b94fac | ||
|
|
33023aa282 | ||
|
|
eeb3cdc99e | ||
|
|
1e7147f060 | ||
|
|
8936beaff3 | ||
|
|
c00283f247 | ||
|
|
a4269cf308 | ||
|
|
91f3c8a4d0 | ||
|
|
30c7ee9c7b | ||
|
|
456d8b9eb9 | ||
|
|
4fe3e6d31a | ||
|
|
01ee1c80b4 | ||
|
|
c14fe47a81 | ||
|
|
b9deabbf0a | ||
|
|
5d66a98e78 | ||
|
|
2d46d6ffae | ||
|
|
57107e50a7 | ||
|
|
47271254f6 | ||
|
|
f304028874 | ||
|
|
8abc5e159a | ||
|
|
b1ac53c4dd | ||
|
|
27ab68c856 | ||
|
|
ddf5a3953b | ||
|
|
92d2fc101d | ||
|
|
8996000d2b | ||
|
|
a2fcba2349 | ||
|
|
abe8638991 | ||
|
|
0b5064b474 | ||
|
|
da9d4cf5b9 | ||
|
|
a62cca15dd | ||
|
|
ac04246a2a | ||
|
|
0923145bd7 | ||
|
|
a216cb4105 | ||
|
|
01705d1f3d | ||
|
|
14f93b4e9d | ||
|
|
ad11036c36 | ||
|
|
632a06076b | ||
|
|
242c2b0268 | ||
|
|
19662da905 | ||
|
|
7faee5af35 | ||
|
|
805ee1bf31 | ||
|
|
bea46fdfa1 | ||
|
|
f6b1fb1c88 | ||
|
|
6fb349ea76 | ||
|
|
e5a425f5c7 | ||
|
|
f157d37e4c | ||
|
|
5f08559bef | ||
|
|
a082d2aecd | ||
|
|
bcfaff8504 | ||
|
|
d8e09c346f | ||
|
|
876519731b | ||
|
|
de05b83aca | ||
|
|
56c73e7193 | ||
|
|
859ac008a8 | ||
|
|
f882bd27c8 | ||
|
|
361e5759c1 | ||
|
|
34ef0da896 | ||
|
|
726e8b962f | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 |
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: 🐞 Bug report
|
||||
description: Report a bug or problem with running Prysm
|
||||
labels: ["Bug"]
|
||||
type: "Bug"
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
|
||||
43
.github/workflows/check-specrefs.yml
vendored
Normal file
43
.github/workflows/check-specrefs.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Check Spec References
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check-specrefs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
WORKSPACE_VERSION=$(grep 'consensus_spec_version = ' WORKSPACE | sed 's/.*"\(.*\)"/\1/')
|
||||
ETHSPECIFY_VERSION=$(grep '^version:' specrefs/.ethspecify.yml | sed 's/version: //')
|
||||
if [ "$WORKSPACE_VERSION" != "$ETHSPECIFY_VERSION" ]; then
|
||||
echo "Version mismatch between WORKSPACE and ethspecify"
|
||||
echo " WORKSPACE: $WORKSPACE_VERSION"
|
||||
echo " specrefs/.ethspecify.yml: $ETHSPECIFY_VERSION"
|
||||
exit 1
|
||||
else
|
||||
echo "Versions match: $WORKSPACE_VERSION"
|
||||
fi
|
||||
|
||||
- name: Install ethspecify
|
||||
run: python3 -mpip install ethspecify
|
||||
|
||||
- name: Update spec references
|
||||
run: ethspecify process --path=specrefs
|
||||
|
||||
- name: Check for differences
|
||||
run: |
|
||||
if ! git diff --exit-code specrefs >/dev/null; then
|
||||
echo "Spec references are out-of-date!"
|
||||
echo ""
|
||||
git --no-pager diff specrefs
|
||||
exit 1
|
||||
else
|
||||
echo "Spec references are up-to-date!"
|
||||
fi
|
||||
|
||||
- name: Check spec references
|
||||
run: ethspecify check --path=specrefs
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -253,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.1"
|
||||
consensus_spec_version = "v1.6.0-alpha.5"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-o4t9p3R+fQHF4KOykGmwlG3zDw5wUdVWprkzId8aIsk=",
|
||||
"minimal": "sha256-sU7ToI8t3MR8x0vVjC8ERmAHZDWpEmnAC9FWIpHi5x4=",
|
||||
"mainnet": "sha256-YKS4wngg0LgI9Upp4MYJ77aG+8+e/G4YeqEIlp06LZw=",
|
||||
"general": "sha256-BXuEb1XbeSft0qzVFnoB8KC0YR1qM3ybT5lKUDbUWn8=",
|
||||
"minimal": "sha256-EjwSHgBbWSoy5hm9V+A/bVMabyojaKsBNPrRtuPVq4k=",
|
||||
"mainnet": "sha256-OGWMzarzaV1B9mVpy48/DCUbhjfX+b64pAxWwPLWhAs=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Nv4TEuEJPQIM4E6T9J0FOITsmappmXZjGtlhe1HEXnU=",
|
||||
integrity = "sha256-FQWR5EZuVcQGR0ol9vpd7eunnfGexJ/7J3xycrFEJbU=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -102,6 +102,7 @@ type BuilderClient interface {
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubkey [48]byte) (SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, svr []*ethpb.SignedValidatorRegistrationV1) error
|
||||
SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error
|
||||
Status(ctx context.Context) error
|
||||
}
|
||||
|
||||
@@ -152,7 +153,8 @@ func (c *Client) NodeURL() string {
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
// It validates that the HTTP response status matches the expectedStatus parameter.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, expectedStatus int, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||
defer func() {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -187,8 +189,8 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
log.WithError(closeErr).Error("Failed to close response body")
|
||||
}
|
||||
}()
|
||||
if r.StatusCode != http.StatusOK {
|
||||
err = non200Err(r)
|
||||
if r.StatusCode != expectedStatus {
|
||||
err = unexpectedStatusErr(r, expectedStatus)
|
||||
return
|
||||
}
|
||||
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
|
||||
@@ -236,7 +238,7 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
}
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, getOpts)
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, http.StatusOK, getOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting header from builder server")
|
||||
}
|
||||
@@ -409,7 +411,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
}
|
||||
}
|
||||
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), postOpts); err != nil {
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), http.StatusOK, postOpts); err != nil {
|
||||
return errors.Wrap(err, "do")
|
||||
}
|
||||
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
|
||||
@@ -471,7 +473,7 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
|
||||
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
|
||||
// blobs bundle if it is post deneb.
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusOK, postOpts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
|
||||
}
|
||||
@@ -501,6 +503,24 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
return ed, blobs, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu calls the builder API endpoint post-Fulu where relays only return status codes.
|
||||
// This method is used after the Fulu fork when MEV-boost relays no longer return execution payloads.
|
||||
func (c *Client) SubmitBlindedBlockPostFulu(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
body, postOpts, err := c.buildBlindedBlockRequest(sb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Post the blinded block - the response should only contain a status code (no payload)
|
||||
_, _, err = c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), http.StatusAccepted, postOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error posting the blinded block to the builder api post-Fulu")
|
||||
}
|
||||
|
||||
// Success is indicated by no error (status 202)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) checkBlockVersion(respBytes []byte, header http.Header) (int, error) {
|
||||
var versionHeader string
|
||||
if c.sszEnabled {
|
||||
@@ -657,11 +677,11 @@ func (c *Client) Status(ctx context.Context) error {
|
||||
getOpts := func(r *http.Request) {
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, getOpts)
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, http.StatusOK, getOpts)
|
||||
return err
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
func unexpectedStatusErr(response *http.Response, expected int) error {
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
|
||||
var errMessage ErrorMessage
|
||||
var body string
|
||||
@@ -670,7 +690,7 @@ func non200Err(response *http.Response) error {
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
msg := fmt.Sprintf("expected=%d, got=%d, url=%s, body=%s", expected, response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case http.StatusUnsupportedMediaType:
|
||||
log.WithError(ErrUnsupportedMediaType).Debug(msg)
|
||||
|
||||
@@ -1555,6 +1555,89 @@ func testSignedBlindedBeaconBlockElectra(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubmitBlindedBlockPostFulu(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
// Post-Fulu: only return status code, no payload
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
Body: io.NopCloser(bytes.NewBufferString("")),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("success_ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
// Post-Fulu: only return status code, no payload
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusAccepted,
|
||||
Body: io.NopCloser(bytes.NewBufferString("")),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("error_response", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
message := ErrorMessage{
|
||||
Code: 400,
|
||||
Message: "Bad Request",
|
||||
}
|
||||
resp, err := json.Marshal(message)
|
||||
require.NoError(t, err)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Body: io.NopCloser(bytes.NewBuffer(resp)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
err = c.SubmitBlindedBlockPostFulu(ctx, sbbb)
|
||||
require.ErrorIs(t, err, ErrNotOK)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRequestLogger(t *testing.T) {
|
||||
wo := WithObserver(&requestLogger{})
|
||||
c, err := NewClient("localhost:3500", wo)
|
||||
|
||||
@@ -45,6 +45,11 @@ func (MockClient) SubmitBlindedBlock(_ context.Context, _ interfaces.ReadOnlySig
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu --
|
||||
func (MockClient) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status --
|
||||
func (MockClient) Status(_ context.Context) error {
|
||||
return nil
|
||||
|
||||
@@ -1699,7 +1699,7 @@ func TestExecutionPayloadHeaderCapellaRoundtrip(t *testing.T) {
|
||||
require.DeepEqual(t, string(expected[0:len(expected)-1]), string(m))
|
||||
}
|
||||
|
||||
func TestErrorMessage_non200Err(t *testing.T) {
|
||||
func TestErrorMessage_unexpectedStatusErr(t *testing.T) {
|
||||
mockRequest := &http.Request{
|
||||
URL: &url.URL{Path: "example.com"},
|
||||
}
|
||||
@@ -1779,7 +1779,7 @@ func TestErrorMessage_non200Err(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := non200Err(tt.args)
|
||||
err := unexpectedStatusErr(tt.args, http.StatusOK)
|
||||
if err != nil && tt.wantMessage != "" {
|
||||
require.ErrorContains(t, tt.wantMessage, err)
|
||||
}
|
||||
|
||||
@@ -102,7 +102,6 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,32 +1,14 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(sidecars[0].Blob),
|
||||
bytesToCommitment(sidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(sidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(sidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
@@ -42,3 +24,144 @@ func bytesToKZGProof(proof []byte) (ret GoKZG.KZGProof) {
|
||||
copy(ret[:], proof)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify performs single or batch verification of commitments depending on the number of given BlobSidecars.
|
||||
func Verify(blobSidecars ...blocks.ROBlob) error {
|
||||
if len(blobSidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(blobSidecars) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobSidecars[0].Blob),
|
||||
bytesToCommitment(blobSidecars[0].KzgCommitment),
|
||||
bytesToKZGProof(blobSidecars[0].KzgProof))
|
||||
}
|
||||
blobs := make([]GoKZG.Blob, len(blobSidecars))
|
||||
cmts := make([]GoKZG.KZGCommitment, len(blobSidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(blobSidecars))
|
||||
for i, sidecar := range blobSidecars {
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
// VerifyBlobKZGProofBatch verifies KZG proofs for multiple blobs using batch verification.
|
||||
// This is more efficient than verifying each blob individually when len(blobs) > 1.
|
||||
// For single blob verification, it uses the optimized single verification path.
|
||||
func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]byte) error {
|
||||
if len(blobs) != len(commitments) || len(blobs) != len(proofs) {
|
||||
return errors.Errorf("number of blobs (%d), commitments (%d), and proofs (%d) must match", len(blobs), len(commitments), len(proofs))
|
||||
}
|
||||
|
||||
if len(blobs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Optimize for single blob case - use single verification to avoid batch overhead
|
||||
if len(blobs) == 1 {
|
||||
return kzgContext.VerifyBlobKZGProof(
|
||||
bytesToBlob(blobs[0]),
|
||||
bytesToCommitment(commitments[0]),
|
||||
bytesToKZGProof(proofs[0]))
|
||||
}
|
||||
|
||||
// Use batch verification for multiple blobs
|
||||
ckzgBlobs := make([]ckzg4844.Blob, len(blobs))
|
||||
ckzgCommitments := make([]ckzg4844.Bytes48, len(commitments))
|
||||
ckzgProofs := make([]ckzg4844.Bytes48, len(proofs))
|
||||
|
||||
for i := range blobs {
|
||||
if len(blobs[i]) != len(ckzg4844.Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
|
||||
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
|
||||
ckzgProofs[i] = ckzg4844.Bytes48(proofs[i])
|
||||
}
|
||||
|
||||
valid, err := ckzg4844.VerifyBlobKZGProofBatch(ckzgBlobs, ckzgCommitments, ckzgProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("batch KZG proof verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyCellKZGProofBatchFromBlobData verifies cell KZG proofs in batch format directly from blob data.
|
||||
// This is more efficient than reconstructing data column sidecars when you have the raw blob data and cell proofs.
|
||||
// For PeerDAS/Fulu, the execution client provides cell proofs in flattened format via BlobsBundleV2.
|
||||
// For single blob verification, it optimizes by computing cells once and verifying efficiently.
|
||||
func VerifyCellKZGProofBatchFromBlobData(blobs [][]byte, commitments [][]byte, cellProofs [][]byte, numberOfColumns uint64) error {
|
||||
blobCount := uint64(len(blobs))
|
||||
expectedCellProofs := blobCount * numberOfColumns
|
||||
|
||||
if uint64(len(cellProofs)) != expectedCellProofs {
|
||||
return errors.Errorf("expected %d cell proofs, got %d", expectedCellProofs, len(cellProofs))
|
||||
}
|
||||
|
||||
if len(commitments) != len(blobs) {
|
||||
return errors.Errorf("number of commitments (%d) must match number of blobs (%d)", len(commitments), len(blobs))
|
||||
}
|
||||
|
||||
if blobCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle multiple blobs - compute cells for all blobs
|
||||
allCells := make([]Cell, 0, expectedCellProofs)
|
||||
allCommitments := make([]Bytes48, 0, expectedCellProofs)
|
||||
allIndices := make([]uint64, 0, expectedCellProofs)
|
||||
allProofs := make([]Bytes48, 0, expectedCellProofs)
|
||||
|
||||
for blobIndex := range blobs {
|
||||
if len(blobs[blobIndex]) != len(Blob{}) {
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[blobIndex]), len(Blob{}))
|
||||
}
|
||||
// Convert blob to kzg.Blob type
|
||||
blob := Blob(blobs[blobIndex])
|
||||
|
||||
// Compute cells for this blob
|
||||
cells, err := ComputeCells(&blob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to compute cells for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
// Add cells and corresponding data for each column
|
||||
for columnIndex := range numberOfColumns {
|
||||
cellProofIndex := uint64(blobIndex)*numberOfColumns + columnIndex
|
||||
if len(commitments[blobIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[blobIndex]), len(Bytes48{}))
|
||||
}
|
||||
if len(cellProofs[cellProofIndex]) != len(Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(cellProofs[cellProofIndex]), len(Bytes48{}))
|
||||
}
|
||||
allCells = append(allCells, cells[columnIndex])
|
||||
allCommitments = append(allCommitments, Bytes48(commitments[blobIndex]))
|
||||
allIndices = append(allIndices, columnIndex)
|
||||
|
||||
allProofs = append(allProofs, Bytes48(cellProofs[cellProofIndex]))
|
||||
}
|
||||
}
|
||||
|
||||
// Batch verify all cells
|
||||
valid, err := VerifyCellKZGProofBatch(allCommitments, allIndices, allCells, allProofs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cell batch verification")
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("cell KZG proof batch verification failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,8 +22,8 @@ func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZG
|
||||
}
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
sidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(sidecars...))
|
||||
blobSidecars := make([]blocks.ROBlob, 0)
|
||||
require.NoError(t, Verify(blobSidecars...))
|
||||
}
|
||||
|
||||
func TestBytesToAny(t *testing.T) {
|
||||
@@ -37,6 +37,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -45,3 +46,432 @@ func TestGenerateCommitmentAndProof(t *testing.T) {
|
||||
require.Equal(t, expectedCommitment, commitment)
|
||||
require.Equal(t, expectedProof, proof)
|
||||
}
|
||||
|
||||
func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob batch", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob batch", func(t *testing.T) {
|
||||
blobCount := 3
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
proofs[i] = proof[:]
|
||||
}
|
||||
|
||||
err := VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyBlobKZGProofBatch([][]byte{}, [][]byte{}, [][]byte{})
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched input lengths", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test different mismatch scenarios
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{},
|
||||
[][]byte{proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (1), commitments (0), and proofs (1) must match", err)
|
||||
|
||||
err = VerifyBlobKZGProofBatch(
|
||||
[][]byte{blob[:], blob[:]},
|
||||
[][]byte{commitment[:]},
|
||||
[][]byte{proof[:], proof[:]},
|
||||
)
|
||||
require.ErrorContains(t, "number of blobs (2), commitments (1), and proofs (2) must match", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
_, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use a different blob's commitment (mismatch)
|
||||
differentBlob := random.GetRandBlob(456)
|
||||
wrongCommitment, _, err := GenerateCommitmentAndProof(differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
proofs := [][]byte{proof[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
// Single blob optimization uses different error message
|
||||
require.ErrorContains(t, "can't verify opening proof", err)
|
||||
})
|
||||
|
||||
t.Run("invalid proof should fail", func(t *testing.T) {
|
||||
blob := random.GetRandBlob(123)
|
||||
commitment, _, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use wrong proof
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
proofs := [][]byte{invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "short buffer", err)
|
||||
})
|
||||
|
||||
t.Run("mixed valid and invalid proofs should fail", func(t *testing.T) {
|
||||
// First blob - valid
|
||||
blob1 := random.GetRandBlob(123)
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Second blob - invalid proof
|
||||
blob2 := random.GetRandBlob(456)
|
||||
commitment2, _, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
invalidProof := make([]byte, 48) // All zeros
|
||||
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment2[:]}
|
||||
proofs := [][]byte{proof1[:], invalidProof}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("batch KZG proof verification failed", func(t *testing.T) {
|
||||
// Create multiple blobs with mismatched commitments and proofs to trigger batch verification failure
|
||||
blob1 := random.GetRandBlob(123)
|
||||
blob2 := random.GetRandBlob(456)
|
||||
|
||||
// Generate valid proof for blob1
|
||||
commitment1, proof1, err := GenerateCommitmentAndProof(blob1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate valid proof for blob2 but use wrong commitment (from blob1)
|
||||
_, proof2, err := GenerateCommitmentAndProof(blob2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use blob2 data with blob1's commitment and blob2's proof - this should cause batch verification to fail
|
||||
blobs := [][]byte{blob1[:], blob2[:]}
|
||||
commitments := [][]byte{commitment1[:], commitment1[:]} // Wrong commitment for blob2
|
||||
proofs := [][]byte{proof1[:], proof2[:]}
|
||||
|
||||
err = VerifyBlobKZGProofBatch(blobs, commitments, proofs)
|
||||
require.ErrorContains(t, "batch KZG proof verification failed", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
// Initialize KZG for testing
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid single blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create flattened cell proofs (like execution client format)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("valid multiple blob cell verification", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := range blobCount {
|
||||
// Generate blob and commitment
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs for this blob
|
||||
for j := range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("empty inputs should pass", func(t *testing.T) {
|
||||
err := VerifyCellKZGProofBatchFromBlobData([][]byte{}, [][]byte{}, [][]byte{}, 128)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("mismatched blob and commitment count", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(
|
||||
[][]byte{blob[:]},
|
||||
[][]byte{}, // Empty commitments
|
||||
[][]byte{},
|
||||
128,
|
||||
)
|
||||
require.ErrorContains(t, "expected 128 cell proofs", err)
|
||||
})
|
||||
|
||||
t.Run("wrong cell proof count", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Wrong number of cell proofs - should be 128 for 1 blob, but provide 10
|
||||
wrongCellProofs := make([][]byte, 10)
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, wrongCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "expected 128 cell proofs, got 10", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proofs should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
// Create invalid cell proofs (all zeros)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
invalidCellProofs[i] = make([]byte, 48) // All zeros
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell batch verification", err)
|
||||
})
|
||||
|
||||
t.Run("mismatched commitment should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Generate blob and correct cell proofs
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate wrong commitment from different blob
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var differentBlob Blob
|
||||
copy(differentBlob[:], randBlob2[:])
|
||||
wrongCommitment, err := BlobToKZGCommitment(&differentBlob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{wrongCommitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "cell KZG proof batch verification failed", err)
|
||||
})
|
||||
|
||||
t.Run("invalid blob data that should cause ComputeCells to fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
// Create invalid blob (not properly formatted)
|
||||
invalidBlobData := make([]byte, 10) // Too short
|
||||
commitment := make([]byte, 48) // Dummy commitment
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{invalidBlobData}
|
||||
commitments := [][]byte{commitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorContains(t, "blobs len (10) differs from expected (131072)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid commitment size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
// Create invalid commitment (wrong size)
|
||||
invalidCommitment := make([]byte, 32) // Should be 48 bytes
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{invalidCommitment}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, cellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("invalid cell proof size should fail", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create invalid cell proofs (wrong size)
|
||||
invalidCellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
if i == 0 {
|
||||
invalidCellProofs[i] = make([]byte, 32) // Wrong size - should be 48
|
||||
} else {
|
||||
invalidCellProofs[i] = make([]byte, 48)
|
||||
}
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
commitments := [][]byte{commitment[:]}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, invalidCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid commitments", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
// First blob - valid
|
||||
randBlob1 := random.GetRandBlob(123)
|
||||
var blob1 Blob
|
||||
copy(blob1[:], randBlob1[:])
|
||||
commitment1, err := BlobToKZGCommitment(&blob1)
|
||||
require.NoError(t, err)
|
||||
blobs[0] = blob1[:]
|
||||
commitments[0] = commitment1[:]
|
||||
|
||||
// Second blob - use invalid commitment size
|
||||
randBlob2 := random.GetRandBlob(456)
|
||||
var blob2 Blob
|
||||
copy(blob2[:], randBlob2[:])
|
||||
blobs[1] = blob2[:]
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
|
||||
err = VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "commitments len (32) differs from expected (48)", err)
|
||||
})
|
||||
|
||||
t.Run("multiple blobs with mixed invalid cell proof sizes", func(t *testing.T) {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 2
|
||||
|
||||
blobs := make([][]byte, blobCount)
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
} else {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := VerifyCellKZGProofBatchFromBlobData(blobs, commitments, allCellProofs, numberOfColumns)
|
||||
require.ErrorContains(t, "proofs len (20) differs from expected (48)", err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -159,7 +159,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -240,9 +240,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", b.Block().Slot())
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
@@ -308,6 +309,30 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
|
||||
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), roBlock); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability at slot %d", slot)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -584,7 +609,7 @@ func (s *Service) runLateBlockTasks() {
|
||||
// It returns a map where each key represents a missing BlobSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// BlobSidecars against the set of known missing sidecars.
|
||||
func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
func missingBlobIndices(store *filesystem.BlobStorage, root [fieldparams.RootLength]byte, expected [][]byte, slot primitives.Slot) (map[uint64]bool, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
@@ -592,7 +617,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
if len(expected) > maxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
indices := store.Summary(root)
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for i := range expected {
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
@@ -607,7 +632,7 @@ func missingBlobIndices(bs *filesystem.BlobStorage, root [fieldparams.RootLength
|
||||
// It returns a map where each key represents a missing DataColumnSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// DataColumns against the set of known missing sidecars.
|
||||
func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldparams.RootLength]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -619,7 +644,7 @@ func missingDataColumnIndices(bs *filesystem.DataColumnStorage, root [fieldparam
|
||||
}
|
||||
|
||||
// Get a summary of the data columns stored in the database.
|
||||
summary := bs.Summary(root)
|
||||
summary := store.Summary(root)
|
||||
|
||||
// Check all expected data columns against the summary.
|
||||
missing := make(map[uint64]bool)
|
||||
@@ -717,7 +742,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
summary := s.dataColumnStorage.Summary(root)
|
||||
storedDataColumnsCount := summary.Count()
|
||||
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
// As soon as we have enough data column sidecars, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
@@ -820,7 +845,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,6 +30,10 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrInvalidCheckpointArgs may be returned when the finalized checkpoint has an epoch greater than the justified checkpoint epoch.
|
||||
// If you are seeing this error, make sure you haven't mixed up the order of the arguments in the method you are calling.
|
||||
var ErrInvalidCheckpointArgs = errors.New("finalized checkpoint cannot be greater than justified checkpoint")
|
||||
|
||||
// CurrentSlot returns the current slot based on time.
|
||||
func (s *Service) CurrentSlot() primitives.Slot {
|
||||
return slots.CurrentSlot(s.genesisTime)
|
||||
@@ -454,6 +458,9 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
if fCheckpoint.Epoch > jCheckpoint.Epoch {
|
||||
return ErrInvalidCheckpointArgs
|
||||
}
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
// Fork choice only matters from last finalized slot.
|
||||
|
||||
@@ -375,6 +375,81 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
func TestFillForkChoiceMissingBlocks_ErrorCases(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
finalizedEpoch primitives.Epoch
|
||||
justifiedEpoch primitives.Epoch
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "finalized epoch greater than justified epoch",
|
||||
finalizedEpoch: 5,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: ErrInvalidCheckpointArgs,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized equal to justified",
|
||||
finalizedEpoch: 3,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "valid case - finalized less than justified",
|
||||
finalizedEpoch: 2,
|
||||
justifiedEpoch: 3,
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, beaconDB := tr.ctx, tr.db
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
// Create a simple block for testing
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 10
|
||||
blk.Block.ParentRoot = service.originBlockRoot[:]
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
|
||||
// Create checkpoints with test case epochs
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.finalizedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: tt.justifiedEpoch,
|
||||
Root: service.originBlockRoot[:],
|
||||
}
|
||||
|
||||
// Set up forkchoice store to avoid other errors
|
||||
fcp := ðpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, service.originBlockRoot, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
t.Context(), wsb, finalizedCheckpoint, justifiedCheckpoint)
|
||||
|
||||
if tt.expectedError != nil {
|
||||
require.ErrorIs(t, err, tt.expectedError)
|
||||
} else {
|
||||
// For valid cases, we might get other errors (like block not being descendant of finalized)
|
||||
// but we shouldn't get the checkpoint validation error
|
||||
if err != nil && errors.Is(err, tt.expectedError) {
|
||||
t.Errorf("Unexpected checkpoint validation error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// blockTree1 constructs the following tree:
|
||||
//
|
||||
// /- B1
|
||||
@@ -2132,13 +2207,13 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
// Service's store has the finalized state as headRoot
|
||||
// Service's store has the justified checkpoint root as headRoot (verified below through justified checkpoint comparison)
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, genesisRoot, bytesutil.ToBytes32(headRoot))
|
||||
require.NotEqual(t, bytesutil.ToBytes32(params.BeaconConfig().ZeroHash[:]), bytesutil.ToBytes32(headRoot)) // Ensure head is not zero
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, true, optimistic) // Head is now optimistic when starting from justified checkpoint
|
||||
|
||||
// Check that the node's justified checkpoint does not agree with the
|
||||
// last valid state's justified checkpoint
|
||||
@@ -2889,7 +2964,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
for i := range minimumColumnsCountToReconstruct {
|
||||
indices = append(indices, i)
|
||||
@@ -2974,7 +3049,7 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
|
||||
startWaiting := make(chan bool)
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct-missingColumns)
|
||||
|
||||
for i := range minimumColumnsCountToReconstruct - missingColumns {
|
||||
|
||||
@@ -17,7 +17,7 @@ func (s *Service) ReceiveDataColumns(dataColumnSidecars []blocks.VerifiedRODataC
|
||||
// ReceiveDataColumn receives a single data column.
|
||||
func (s *Service) ReceiveDataColumn(dataColumnSidecar blocks.VerifiedRODataColumn) error {
|
||||
if err := s.dataColumnStorage.Save([]blocks.VerifiedRODataColumn{dataColumnSidecar}); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
return errors.Wrap(err, "save data column sidecar")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -20,7 +20,7 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
return errors.Wrap(err, "could not set up forkchoice tree")
|
||||
}
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
@@ -30,24 +30,24 @@ func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
jRoot := s.ensureRootNotZeros([32]byte(jp.Root))
|
||||
if headStr == "" {
|
||||
return fRoot
|
||||
return jRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not get head block root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
log.WithError(err).Error("Could not parse head root, starting with justified block as head")
|
||||
return jRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func Test_startupHeadRoot(t *testing.T) {
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with finalized block as head")
|
||||
require.LogsContain(t, hook, "Could not get head block root, starting with justified block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
|
||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ var ErrNoBuilder = errors.New("builder endpoint not configured")
|
||||
// BlockBuilder defines the interface for interacting with the block builder
|
||||
type BlockBuilder interface {
|
||||
SubmitBlindedBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, v1.BlobsBundler, error)
|
||||
SubmitBlindedBlockPostFulu(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) error
|
||||
GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error)
|
||||
RegisterValidator(ctx context.Context, reg []*ethpb.SignedValidatorRegistrationV1) error
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
@@ -101,6 +102,22 @@ func (s *Service) SubmitBlindedBlock(ctx context.Context, b interfaces.ReadOnlyS
|
||||
return s.c.SubmitBlindedBlock(ctx, b)
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu submits a blinded block to the builder relay network post-Fulu.
|
||||
// After Fulu, relays only return status codes (no payload).
|
||||
func (s *Service) SubmitBlindedBlockPostFulu(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.SubmitBlindedBlockPostFulu")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
submitBlindedBlockLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
if s.c == nil {
|
||||
return ErrNoBuilder
|
||||
}
|
||||
|
||||
return s.c.SubmitBlindedBlockPostFulu(ctx, b)
|
||||
}
|
||||
|
||||
// GetHeader retrieves the header for a given slot and parent hash from the builder relay network.
|
||||
func (s *Service) GetHeader(ctx context.Context, slot primitives.Slot, parentHash [32]byte, pubKey [48]byte) (builder.SignedBid, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.GetHeader")
|
||||
|
||||
@@ -24,21 +24,22 @@ type Config struct {
|
||||
|
||||
// MockBuilderService to mock builder.
|
||||
type MockBuilderService struct {
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
BlobBundleV2 *v1.BlobsBundleV2
|
||||
ErrSubmitBlindedBlock error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
HasConfigured bool
|
||||
Payload *v1.ExecutionPayload
|
||||
PayloadCapella *v1.ExecutionPayloadCapella
|
||||
PayloadDeneb *v1.ExecutionPayloadDeneb
|
||||
BlobBundle *v1.BlobsBundle
|
||||
BlobBundleV2 *v1.BlobsBundleV2
|
||||
ErrSubmitBlindedBlock error
|
||||
ErrSubmitBlindedBlockPostFulu error
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
Cfg *Config
|
||||
}
|
||||
|
||||
// Configured for mocking.
|
||||
@@ -115,3 +116,8 @@ func (s *MockBuilderService) RegistrationByValidatorID(ctx context.Context, id p
|
||||
func (s *MockBuilderService) RegisterValidator(context.Context, []*ethpb.SignedValidatorRegistrationV1) error {
|
||||
return s.ErrRegisterValidator
|
||||
}
|
||||
|
||||
// SubmitBlindedBlockPostFulu for mocking.
|
||||
func (s *MockBuilderService) SubmitBlindedBlockPostFulu(_ context.Context, _ interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
return s.ErrSubmitBlindedBlockPostFulu
|
||||
}
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -223,6 +223,14 @@ func dataColumnsSidecars(
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
// Validate that we have enough cells and proofs for this column index
|
||||
if columnIndex >= uint64(len(cellsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds cells length %d for blob %d", columnIndex, len(cellsForRow), rowIndex)
|
||||
}
|
||||
if columnIndex >= uint64(len(proofsForRow)) {
|
||||
return nil, errors.Errorf("column index %d exceeds proofs length %d for blob %d", columnIndex, len(proofsForRow), rowIndex)
|
||||
}
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
|
||||
@@ -67,6 +67,55 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("cells array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with insufficient cells for the number of columns.
|
||||
// This simulates a scenario where cellsAndProofs has fewer cells than expected columns.
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, 10), // Only 10 cells
|
||||
Proofs: make([]kzg.Proof, 10), // Only 10 proofs
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail because the function will try to access columns up to NumberOfColumns
|
||||
// but we only have 10 cells/proofs.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds cells length", err)
|
||||
})
|
||||
|
||||
t.Run("proofs array too short for column index", func(t *testing.T) {
|
||||
// Create a Fulu block with a blob commitment.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, 48)}
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsAndProofs := []kzg.CellsAndProofs{
|
||||
{
|
||||
Cells: make([]kzg.Cell, numberOfColumns),
|
||||
Proofs: make([]kzg.Proof, 5), // Only 5 proofs, less than columns
|
||||
},
|
||||
}
|
||||
|
||||
// This should fail when trying to access proof beyond index 4.
|
||||
_, err = peerdas.DataColumnSidecars(signedBeaconBlock, cellsAndProofs)
|
||||
require.ErrorContains(t, "column index", err)
|
||||
require.ErrorContains(t, "exceeds proofs length", err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
|
||||
@@ -18,8 +18,8 @@ var (
|
||||
ErrBlobsCellsProofsMismatch = errors.New("blobs and cells proofs mismatch")
|
||||
)
|
||||
|
||||
// MinimumColumnsCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
func MinimumColumnsCountToReconstruct() uint64 {
|
||||
// MinimumColumnCountToReconstruct return the minimum number of columns needed to proceed to a reconstruction.
|
||||
func MinimumColumnCountToReconstruct() uint64 {
|
||||
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If the number of columns is even, then we need total / 2 columns to reconstruct.
|
||||
return (params.BeaconConfig().NumberOfColumns + 1) / 2
|
||||
@@ -58,7 +58,7 @@ func ReconstructDataColumnSidecars(inVerifiedRoSidecars []blocks.VerifiedRODataC
|
||||
|
||||
// Check if there is enough sidecars to reconstruct the missing columns.
|
||||
sidecarCount := len(sidecarByIndex)
|
||||
if uint64(sidecarCount) < MinimumColumnsCountToReconstruct() {
|
||||
if uint64(sidecarCount) < MinimumColumnCountToReconstruct() {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestMinimumColumnsCountToReconstruct(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the minimum number of columns needed to reconstruct.
|
||||
actual := peerdas.MinimumColumnsCountToReconstruct()
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
@@ -100,7 +100,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
t.Run("not enough columns to enable reconstruction", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
minimum := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimum := peerdas.MinimumColumnCountToReconstruct()
|
||||
_, err := peerdas.ReconstructDataColumnSidecars(verifiedRoSidecars[:minimum-1])
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
@@ -4,7 +4,6 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
@@ -13,7 +12,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -23,7 +21,6 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -33,7 +30,6 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
],
|
||||
@@ -49,7 +45,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -53,30 +53,25 @@ func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchV
|
||||
// Persist adds blobs to the working blob cache. Blobs stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ...blocks.ROBlob) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
if len(blobSidecars) > 1 {
|
||||
firstRoot := blobSidecars[0].BlockRoot()
|
||||
for _, sidecar := range blobSidecars[1:] {
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := sidecars[0].BlockRoot()
|
||||
for _, sidecar := range sidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blobSidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromSidecar(blobSidecars[0])
|
||||
key := keyFromSidecar(sidecars[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for _, blobSidecar := range blobSidecars {
|
||||
for _, blobSidecar := range sidecars {
|
||||
if err := entry.stash(&blobSidecar); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -118,23 +118,21 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[2]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
}
|
||||
@@ -149,10 +147,8 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
@@ -161,29 +157,25 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
|
||||
scs := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(1, scs...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blocks.NewSidecarFromBlobSidecar(blobSidecars[0])), errIndexOutOfBounds)
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
|
||||
more := blocks.NewSidecarsFromBlobSidecars(moreBlobSidecars)
|
||||
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, as.Persist(32+slotOOB, more[0]))
|
||||
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(1, more...))
|
||||
require.NoError(t, as.Persist(1, moreBlobSidecars...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
@@ -1,213 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
nodeID enode.ID
|
||||
cache *dataColumnCache
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
nodeID enode.ID,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
custodyGroupCount uint64,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
nodeID: nodeID,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custodyGroupCount: custodyGroupCount,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(current primitives.Slot, sidecars ...blocks.ROSidecar) error {
|
||||
if len(sidecars) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
dataColumnSidecars, err := blocks.DataColumnSidecarsFromSidecars(sidecars)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first sidecar.
|
||||
firstSidecar := dataColumnSidecars[0]
|
||||
|
||||
if len(sidecars) > 1 {
|
||||
firstRoot := firstSidecar.BlockRoot()
|
||||
for _, sidecar := range dataColumnSidecars[1:] {
|
||||
if sidecar.BlockRoot() != firstRoot {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
firstSidecarEpoch, currentEpoch := slots.ToEpoch(firstSidecar.Slot()), slots.ToEpoch(current)
|
||||
if !params.WithinDAPeriod(firstSidecarEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := cacheKey{slot: firstSidecar.Slot(), root: firstSidecar.BlockRoot()}
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
for _, sidecar := range dataColumnSidecars {
|
||||
if err := entry.stash(&sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, currentSlot primitives.Slot, block blocks.ROBlock) error {
|
||||
blockCommitments, err := s.fullCommitmentsToCheck(s.nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := cacheKey{slot: block.Block().Slot(), root: blockRoot}
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Set the disk summary for the block in the cache entry.
|
||||
entry.setDiskSummary(s.store.Summary(blockRoot))
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filter(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#datacolumnsidecarsbyrange-v1
|
||||
verifier := s.newDataColumnsVerifier(roDataColumns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return errors.Wrap(err, "valid fields")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verified RO data columns - should never happen")
|
||||
}
|
||||
|
||||
if err := s.store.Save(verifiedRoDataColumns); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early for blocks that are pre-Fulu.
|
||||
if block.Version() < version.Fulu {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current epoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve peer info.
|
||||
samplingSize := max(s.custodyGroupCount, samplesPerSlot)
|
||||
peerInfo, _, err := peerdas.Info(nodeID, samplingSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "peer info")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
commitmentsArraySize := uint64(len(commitmentsArray))
|
||||
|
||||
for column := range peerInfo.CustodyColumns {
|
||||
if column >= commitmentsArraySize {
|
||||
return nil, errors.Errorf("custody column index %d too high (max allowed %d) - should never happen", column, commitmentsArraySize)
|
||||
}
|
||||
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
@@ -1,313 +0,0 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("mixed roots", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
{Slot: 2, Index: 2},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(0, roSidecars...)
|
||||
require.ErrorIs(t, err, errMixedRoots)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
roSidecars, _ := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(1_000_000, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := roSidecarsFromDataColumnParamsByBlockRoot(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, nil, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), entry.diskSummary.Count())
|
||||
|
||||
require.DeepSSZEqual(t, roDataColumns[0], *entry.scs[1])
|
||||
require.DeepSSZEqual(t, roDataColumns[1], *entry.scs[5])
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[int]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0 /*current slot*/, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, enode.ID{}, newDataColumnsVerifier, 0)
|
||||
|
||||
indices := [...]uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dataColumnsParams = append(dataColumnsParams, dataColumnParams)
|
||||
}
|
||||
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnsParams)
|
||||
|
||||
key := cacheKey{root: root}
|
||||
entry := lazilyPersistentStoreColumns.cache.ensure(key)
|
||||
defer lazilyPersistentStoreColumns.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(&verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = lazilyPersistentStoreColumns.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := dataColumnStorage.Get(root, indices[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
summary := dataColumnStorage.Summary(root)
|
||||
require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = 100
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: 100,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, enode.ID{}, nil, numberOfColumns)
|
||||
|
||||
commitmentsArray, err := s.fullCommitmentsToCheck(enode.ID{}, b, tc.slot)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, commitments := range commitmentsArray {
|
||||
require.DeepEqual(t, tc.commitments, commitments)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func roSidecarsFromDataColumnParamsByBlockRoot(t *testing.T, parameters []util.DataColumnParam) ([]blocks.ROSidecar, []blocks.RODataColumn) {
|
||||
roDataColumns, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, parameters)
|
||||
|
||||
roSidecars := make([]blocks.ROSidecar, 0, len(roDataColumns))
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
roSidecars = append(roSidecars, blocks.NewSidecarFromDataColumnSidecar(roDataColumn))
|
||||
}
|
||||
|
||||
return roSidecars, roDataColumns
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock interface{}) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
@@ -99,20 +99,26 @@ func (e *blobCacheEntry) filter(root [32]byte, kc [][]byte, slot primitives.Slot
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
// Check if e.scs has this index before accessing
|
||||
var sidecar *blocks.ROBlob
|
||||
if i < uint64(len(e.scs)) {
|
||||
sidecar = e.scs[i]
|
||||
}
|
||||
|
||||
if kc[i] == nil {
|
||||
if e.scs[i] != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, e.scs[i].KzgCommitment)
|
||||
if sidecar != nil {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, no block commitment", root, i, sidecar.KzgCommitment)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if e.scs[i] == nil {
|
||||
if sidecar == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
if !bytes.Equal(kc[i], e.scs[i].KzgCommitment) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.scs[i].KzgCommitment, kc[i])
|
||||
if !bytes.Equal(kc[i], sidecar.KzgCommitment) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, sidecar.KzgCommitment, kc[i])
|
||||
}
|
||||
scs = append(scs, *e.scs[i])
|
||||
scs = append(scs, *sidecar)
|
||||
}
|
||||
|
||||
return scs, nil
|
||||
|
||||
@@ -155,6 +155,41 @@ func TestFilter(t *testing.T) {
|
||||
},
|
||||
err: errCommitmentMismatch,
|
||||
},
|
||||
{
|
||||
name: "empty scs array with commitments",
|
||||
setup: func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
// This reproduces the panic condition where entry.scs is empty or nil
|
||||
// but we have commitments to check
|
||||
entry := &blobCacheEntry{
|
||||
scs: nil, // Empty/nil array that caused the panic
|
||||
}
|
||||
// Create a commitment that would trigger the check at index 0
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("commitment1"), 48),
|
||||
}
|
||||
return entry, commits, nil
|
||||
},
|
||||
err: errMissingSidecar,
|
||||
},
|
||||
{
|
||||
name: "scs array shorter than commitments",
|
||||
setup: func(t *testing.T) (*blobCacheEntry, [][]byte, []blocks.ROBlob) {
|
||||
// This reproduces the condition where entry.scs exists but is shorter
|
||||
// than the number of commitments we're checking
|
||||
entry := &blobCacheEntry{
|
||||
scs: make([]*blocks.ROBlob, 2), // Only 2 slots
|
||||
}
|
||||
// Create 4 commitments, accessing index 2 and 3 would have panicked
|
||||
commits := [][]byte{
|
||||
nil,
|
||||
nil,
|
||||
bytesutil.PadTo([]byte("commitment3"), 48),
|
||||
bytesutil.PadTo([]byte("commitment4"), 48),
|
||||
}
|
||||
return entry, commits, nil
|
||||
},
|
||||
err: errMissingSidecar,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
|
||||
@@ -15,5 +15,5 @@ import (
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROSidecar) error
|
||||
Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
@@ -5,13 +5,12 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// MockAvailabilityStore is an implementation of AvailabilityStore that can be used by other packages in tests.
|
||||
type MockAvailabilityStore struct {
|
||||
VerifyAvailabilityCallback func(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
PersistBlobsCallback func(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
PersistBlobsCallback func(current primitives.Slot, blobSidecar ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
@@ -25,13 +24,9 @@ func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current pri
|
||||
}
|
||||
|
||||
// Persist satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, sc ...blocks.ROSidecar) error {
|
||||
blobSidecars, err := blocks.BlobSidecarsFromSidecars(sc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob sidecars from sidecars")
|
||||
}
|
||||
func (m *MockAvailabilityStore) Persist(current primitives.Slot, blobSidecar ...blocks.ROBlob) error {
|
||||
if m.PersistBlobsCallback != nil {
|
||||
return m.PersistBlobsCallback(current, blobSidecars...)
|
||||
return m.PersistBlobsCallback(current, blobSidecar...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -100,6 +100,14 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
// DataColumnStorageReader is an interface to read data column sidecars from the filesystem.
|
||||
type DataColumnStorageReader interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
Get(root [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
var _ DataColumnStorageReader = &DataColumnStorage{}
|
||||
|
||||
// WithDataColumnBasePath is a required option that sets the base path of data column storage.
|
||||
func WithDataColumnBasePath(base string) DataColumnStorageOption {
|
||||
return func(b *DataColumnStorage) error {
|
||||
|
||||
@@ -84,12 +84,6 @@ func (s DataColumnStorageSummary) Stored() map[uint64]bool {
|
||||
return stored
|
||||
}
|
||||
|
||||
// DataColumnStorageSummarizer can be used to receive a summary of metadata about data columns on disk for a given root.
|
||||
// The DataColumnStorageSummary can be used to check which indices (if any) are available for a given block by root.
|
||||
type DataColumnStorageSummarizer interface {
|
||||
Summary(root [fieldparams.RootLength]byte) DataColumnStorageSummary
|
||||
}
|
||||
|
||||
type dataColumnStorageSummaryCache struct {
|
||||
mu sync.RWMutex
|
||||
dataColumnCount float64
|
||||
@@ -98,8 +92,6 @@ type dataColumnStorageSummaryCache struct {
|
||||
cache map[[fieldparams.RootLength]byte]DataColumnStorageSummary
|
||||
}
|
||||
|
||||
var _ DataColumnStorageSummarizer = &dataColumnStorageSummaryCache{}
|
||||
|
||||
func newDataColumnStorageSummaryCache() *dataColumnStorageSummaryCache {
|
||||
return &dataColumnStorageSummaryCache{
|
||||
cache: make(map[[fieldparams.RootLength]byte]DataColumnStorageSummary),
|
||||
|
||||
@@ -144,14 +144,3 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
|
||||
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
|
||||
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
|
||||
}
|
||||
|
||||
func NewMockDataColumnStorageSummarizer(t *testing.T, set map[[fieldparams.RootLength]byte][]uint64) DataColumnStorageSummarizer {
|
||||
c := newDataColumnStorageSummaryCache()
|
||||
for root, indices := range set {
|
||||
if err := c.set(DataColumnsIdent{Root: root, Epoch: 0, Indices: indices}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -115,6 +115,17 @@ type NoHeadAccessDatabase interface {
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot, batchSize int) (int, error)
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
|
||||
// Custody operations.
|
||||
UpdateSubscribedToAllDataSubnets(ctx context.Context, subscribed bool) (bool, error)
|
||||
UpdateCustodyInfo(ctx context.Context, earliestAvailableSlot primitives.Slot, custodyGroupCount uint64) (primitives.Slot, uint64, error)
|
||||
@@ -131,16 +142,6 @@ type HeadAccessDatabase interface {
|
||||
HeadBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HeadBlockRoot() ([32]byte, error)
|
||||
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// Support for checkpoint sync and backfill.
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveBackfillStatus(context.Context, *dbval.BackfillStatus) error
|
||||
BackfillFinalizedIndex(ctx context.Context, blocks []blocks.ROBlock, finalizedChildRoot [32]byte) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -318,6 +318,7 @@ func startBaseServices(cliCtx *cli.Context, beacon *BeaconNode, depositAddress s
|
||||
}
|
||||
|
||||
beacon.BlobStorage.WarmCache()
|
||||
beacon.DataColumnStorage.WarmCache()
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx, clearer); err != nil {
|
||||
@@ -845,6 +846,7 @@ func (b *BeaconNode) registerInitialSyncService(complete chan struct{}) error {
|
||||
ClockWaiter: b.clockWaiter,
|
||||
InitialSyncComplete: complete,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
}, opts...)
|
||||
return b.services.RegisterService(is)
|
||||
}
|
||||
@@ -939,6 +941,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
FinalizationFetcher: chainService,
|
||||
BlockReceiver: chainService,
|
||||
BlobReceiver: chainService,
|
||||
DataColumnReceiver: chainService,
|
||||
AttestationReceiver: chainService,
|
||||
GenesisTimeFetcher: chainService,
|
||||
GenesisFetcher: chainService,
|
||||
@@ -966,6 +969,7 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
|
||||
Router: router,
|
||||
ClockWaiter: b.clockWaiter,
|
||||
BlobStorage: b.BlobStorage,
|
||||
DataColumnStorage: b.DataColumnStorage,
|
||||
TrackedValidatorsCache: b.trackedValidatorsCache,
|
||||
PayloadIDCache: b.payloadIDCache,
|
||||
LCStore: b.lcStore,
|
||||
|
||||
@@ -195,7 +195,6 @@ go_test(
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
|
||||
@@ -305,15 +305,15 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
func (s *Service) BroadcastDataColumn(
|
||||
func (s *Service) BroadcastDataColumnSidecar(
|
||||
root [fieldparams.RootLength]byte,
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar *ethpb.DataColumnSidecar,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumn")
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Ensure the data column sidecar is not nil.
|
||||
@@ -330,12 +330,12 @@ func (s *Service) BroadcastDataColumn(
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumn(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, root, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumn(
|
||||
func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
columnSubnet uint64,
|
||||
@@ -343,7 +343,7 @@ func (s *Service) internalBroadcastDataColumn(
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumn")
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
|
||||
@@ -716,7 +716,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
|
||||
// Attempt to broadcast nil object should fail.
|
||||
var emptyRoot [fieldparams.RootLength]byte
|
||||
err = service.BroadcastDataColumn(emptyRoot, subnet, nil)
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, nil)
|
||||
require.ErrorContains(t, "attempted to broadcast nil", err)
|
||||
|
||||
// Subscribe to the topic.
|
||||
@@ -727,7 +727,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumn(emptyRoot, subnet, sidecar)
|
||||
err = service.BroadcastDataColumnSidecar(emptyRoot, subnet, sidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
|
||||
@@ -155,6 +155,7 @@ func (s *Service) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"defaultValue": custodyRequirement,
|
||||
"agent": agentString(pid, s.Host()),
|
||||
})
|
||||
|
||||
// Retrieve the ENR of the peer.
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
@@ -269,6 +270,7 @@ func TestCustodyGroupCountFromPeer(t *testing.T) {
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
metaData: tc.metadata,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
// Retrieve the custody count from the remote peer.
|
||||
@@ -329,6 +331,7 @@ func TestCustodyGroupCountFromPeerENR(t *testing.T) {
|
||||
|
||||
service := &Service{
|
||||
peers: peers,
|
||||
host: testp2p.NewTestP2P(t).Host(),
|
||||
}
|
||||
|
||||
actual := service.custodyGroupCountFromPeerENR(pid)
|
||||
|
||||
@@ -443,20 +443,27 @@ func (s *Service) findPeers(ctx context.Context, missingPeerCount uint) ([]*enod
|
||||
return peersToDial, ctx.Err()
|
||||
}
|
||||
|
||||
// Skip peer not matching the filter.
|
||||
node := iterator.Node()
|
||||
if !s.filterPeer(node) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() > node.Seq() {
|
||||
if ok && existing.Seq() >= node.Seq() {
|
||||
continue // keep existing and skip.
|
||||
}
|
||||
|
||||
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
|
||||
// Skip peer not matching the filter.
|
||||
if !s.filterPeer(node) {
|
||||
if ok {
|
||||
// this means the existing peer with the lower sequence number is no longer valid
|
||||
delete(nodeByNodeID, existing.ID())
|
||||
missingPeerCount++
|
||||
}
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
// We found a new peer. Decrease the missing peer count.
|
||||
nodeByNodeID[node.ID()] = node
|
||||
missingPeerCount--
|
||||
}
|
||||
|
||||
@@ -677,7 +684,7 @@ func (s *Service) filterPeer(node *enode.Node) bool {
|
||||
|
||||
peerData, multiAddrs, err := convertToAddrInfo(node)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not convert to peer data")
|
||||
log.WithError(err).WithField("node", node.String()).Debug("Could not convert to peer data")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -844,7 +851,7 @@ func convertToMultiAddr(nodes []*enode.Node) []ma.Multiaddr {
|
||||
func convertToAddrInfo(node *enode.Node) (*peer.AddrInfo, []ma.Multiaddr, error) {
|
||||
multiAddrs, err := retrieveMultiAddrsFromNode(node)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, errors.Wrap(err, "retrieve multiaddrs from node")
|
||||
}
|
||||
|
||||
if len(multiAddrs) == 0 {
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
mathRand "math/rand"
|
||||
"net"
|
||||
@@ -58,6 +60,81 @@ func createAddrAndPrivKey(t *testing.T) (net.IP, *ecdsa.PrivateKey) {
|
||||
return ipAddr, pkey
|
||||
}
|
||||
|
||||
// createTestNodeWithID creates a LocalNode for testing with deterministic private key
|
||||
// This is needed for deduplication tests where we need the same node ID across different sequence numbers
|
||||
func createTestNodeWithID(t *testing.T, id string) *enode.LocalNode {
|
||||
// Create a deterministic reader based on the ID for consistent key generation
|
||||
h := sha256.New()
|
||||
h.Write([]byte(id))
|
||||
seedBytes := h.Sum(nil)
|
||||
|
||||
// Create a deterministic reader using the seed
|
||||
deterministicReader := bytes.NewReader(seedBytes)
|
||||
|
||||
// Generate the private key using the same approach as the production code
|
||||
privKey, _, err := crypto.GenerateSecp256k1Key(deterministicReader)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to ECDSA private key for enode usage
|
||||
ecdsaPrivKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(privKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
|
||||
localNode := enode.NewLocalNode(db, ecdsaPrivKey)
|
||||
|
||||
// Set basic properties
|
||||
localNode.SetStaticIP(net.ParseIP("127.0.0.1"))
|
||||
localNode.Set(enr.TCP(3000))
|
||||
localNode.Set(enr.UDP(3000))
|
||||
localNode.Set(enr.WithEntry(eth2EnrKey, make([]byte, 16)))
|
||||
|
||||
return localNode
|
||||
}
|
||||
|
||||
// createTestNodeRandom creates a LocalNode for testing using the existing createAddrAndPrivKey function
|
||||
func createTestNodeRandom(t *testing.T) *enode.LocalNode {
|
||||
_, privKey := createAddrAndPrivKey(t)
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
|
||||
localNode := enode.NewLocalNode(db, privKey)
|
||||
|
||||
// Set basic properties
|
||||
localNode.SetStaticIP(net.ParseIP("127.0.0.1"))
|
||||
localNode.Set(enr.TCP(3000))
|
||||
localNode.Set(enr.UDP(3000))
|
||||
localNode.Set(enr.WithEntry(eth2EnrKey, make([]byte, 16)))
|
||||
|
||||
return localNode
|
||||
}
|
||||
|
||||
// setNodeSeq updates a LocalNode to have the specified sequence number
|
||||
func setNodeSeq(localNode *enode.LocalNode, seq uint64) {
|
||||
// Force set the sequence number - we need to update the record seq-1 times
|
||||
// because it starts at 1
|
||||
currentSeq := localNode.Node().Seq()
|
||||
for currentSeq < seq {
|
||||
localNode.Set(enr.WithEntry("dummy", currentSeq))
|
||||
currentSeq++
|
||||
}
|
||||
}
|
||||
|
||||
// setNodeSubnets sets the attestation subnets for a LocalNode
|
||||
func setNodeSubnets(localNode *enode.LocalNode, attSubnets []uint64) {
|
||||
if len(attSubnets) > 0 {
|
||||
bitV := bitfield.NewBitvector64()
|
||||
for _, subnet := range attSubnets {
|
||||
bitV.SetBitAt(subnet, true)
|
||||
}
|
||||
localNode.Set(enr.WithEntry(attSubnetEnrKey, &bitV))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateListener(t *testing.T) {
|
||||
port := 1024
|
||||
ipAddr, pkey := createAddrAndPrivKey(t)
|
||||
@@ -241,7 +318,7 @@ func TestCreateLocalNode(t *testing.T) {
|
||||
|
||||
// Check fork is set.
|
||||
fork := new([]byte)
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2ENRKey, fork)))
|
||||
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(eth2EnrKey, fork)))
|
||||
require.NotEmpty(t, *fork)
|
||||
|
||||
// Check att subnets.
|
||||
@@ -492,7 +569,7 @@ func TestMultipleDiscoveryAddresses(t *testing.T) {
|
||||
node := enode.NewLocalNode(db, key)
|
||||
node.Set(enr.IPv4{127, 0, 0, 1})
|
||||
node.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
|
||||
s := &Service{dv5Listener: mockListener{localNode: node}}
|
||||
s := &Service{dv5Listener: testp2p.NewMockListener(node, nil)}
|
||||
|
||||
multiAddresses, err := s.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
@@ -517,7 +594,7 @@ func TestDiscoveryV5_SeqNumber(t *testing.T) {
|
||||
node := enode.NewLocalNode(db, key)
|
||||
node.Set(enr.IPv4{127, 0, 0, 1})
|
||||
currentSeq := node.Seq()
|
||||
s := &Service{dv5Listener: mockListener{localNode: node}}
|
||||
s := &Service{dv5Listener: testp2p.NewMockListener(node, nil)}
|
||||
_, err = s.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
newSeq := node.Seq()
|
||||
@@ -529,7 +606,7 @@ func TestDiscoveryV5_SeqNumber(t *testing.T) {
|
||||
nodeTwo.Set(enr.IPv6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68})
|
||||
seqTwo := nodeTwo.Seq()
|
||||
assert.NotEqual(t, seqTwo, newSeq)
|
||||
sTwo := &Service{dv5Listener: mockListener{localNode: nodeTwo}}
|
||||
sTwo := &Service{dv5Listener: testp2p.NewMockListener(nodeTwo, nil)}
|
||||
_, err = sTwo.DiscoveryAddresses()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, seqTwo+1, nodeTwo.Seq())
|
||||
@@ -886,3 +963,289 @@ func TestRefreshPersistentSubnets(t *testing.T) {
|
||||
// Reset the config.
|
||||
params.OverrideBeaconConfig(defaultCfg)
|
||||
}
|
||||
|
||||
func TestFindPeers_NodeDeduplication(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
// Create LocalNodes and manipulate sequence numbers
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
localNode2 := createTestNodeWithID(t, "node2")
|
||||
localNode3 := createTestNodeWithID(t, "node3")
|
||||
|
||||
// Create different sequence versions of node1
|
||||
setNodeSeq(localNode1, 1)
|
||||
node1_seq1 := localNode1.Node()
|
||||
setNodeSeq(localNode1, 2)
|
||||
node1_seq2 := localNode1.Node() // Same ID, higher seq
|
||||
setNodeSeq(localNode1, 3)
|
||||
node1_seq3 := localNode1.Node() // Same ID, even higher seq
|
||||
|
||||
// Other nodes with seq 1
|
||||
node2_seq1 := localNode2.Node()
|
||||
node3_seq1 := localNode3.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*enode.Node
|
||||
missingPeers uint
|
||||
expectedCount int
|
||||
description string
|
||||
eval func(t *testing.T, result []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "No duplicates - all unique nodes",
|
||||
nodes: []*enode.Node{
|
||||
node2_seq1,
|
||||
node3_seq1,
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should return all unique nodes without deduplication",
|
||||
eval: nil, // No special validation needed
|
||||
},
|
||||
{
|
||||
name: "Duplicate with lower seq comes first - should replace",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1,
|
||||
node1_seq2, // Higher seq, should replace
|
||||
node2_seq1, // Different node added after duplicates are processed
|
||||
},
|
||||
missingPeers: 2, // Need 2 peers so we process all nodes
|
||||
expectedCount: 2, // Should get node1 (with higher seq) and node2
|
||||
description: "Should keep node with higher sequence number when duplicate found",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have node2 and node1 with higher seq (node1_seq2)
|
||||
foundNode1WithHigherSeq := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2.ID() {
|
||||
require.Equal(t, node1_seq2.Seq(), node.Seq(), "Node1 should have higher seq")
|
||||
foundNode1WithHigherSeq = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with higher seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with higher seq comes first - should keep existing",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq3, // Higher seq
|
||||
node1_seq2, // Lower seq, should be skipped (continue branch)
|
||||
node1_seq1, // Even lower seq, should also be skipped (continue branch)
|
||||
node2_seq1, // Different node added after duplicates are processed
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should keep existing node when it has higher sequence number and skip all lower seq duplicates",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have kept the node with highest seq (node1_seq3)
|
||||
foundNode1WithHigherSeq := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3.ID() {
|
||||
require.Equal(t, node1_seq3.Seq(), node.Seq(), "Node1 should have highest seq")
|
||||
foundNode1WithHigherSeq = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple duplicates with increasing seq",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1,
|
||||
node1_seq2, // Should replace seq1
|
||||
node1_seq3, // Should replace seq2
|
||||
node2_seq1, // Different node added after duplicates are processed
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should keep updating to highest sequence number",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have the node with highest seq (node1_seq3)
|
||||
foundNode1WithHigherSeq := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3.ID() {
|
||||
require.Equal(t, node1_seq3.Seq(), node.Seq(), "Node1 should have highest seq")
|
||||
foundNode1WithHigherSeq = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1WithHigherSeq, "Should have node1 with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with equal seq comes after - should skip",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq2, // First occurrence
|
||||
node1_seq2, // Same exact node instance, should be skipped (continue branch for >= case)
|
||||
node2_seq1, // Different node
|
||||
},
|
||||
missingPeers: 2,
|
||||
expectedCount: 2,
|
||||
description: "Should skip duplicate with equal sequence number",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
// Should have exactly one instance of node1_seq2 and one instance of node2_seq1
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2.ID() {
|
||||
require.Equal(t, node1_seq2.Seq(), node.Seq(), "Node1 should have the expected seq")
|
||||
require.Equal(t, false, foundNode1, "Should have only one instance of node1") // Ensure no duplicates
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1, "Should have node1")
|
||||
require.Equal(t, true, foundNode2, "Should have node2")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mix of unique and duplicate nodes",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1,
|
||||
node2_seq1,
|
||||
node1_seq2, // Should replace node1_seq1
|
||||
node3_seq1,
|
||||
node1_seq3, // Should replace node1_seq2
|
||||
},
|
||||
missingPeers: 3,
|
||||
expectedCount: 3,
|
||||
description: "Should handle mix of unique nodes and duplicates correctly",
|
||||
eval: nil, // Basic count validation is sufficient
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
},
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeers(ctxWithTimeout, tt.missingPeers)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// callbackIterator allows us to execute callbacks at specific points during iteration
|
||||
type callbackIterator struct {
|
||||
nodes []*enode.Node
|
||||
index int
|
||||
callbacks map[int]func() // map from index to callback function
|
||||
}
|
||||
|
||||
func (c *callbackIterator) Next() bool {
|
||||
// Execute callback before checking if we can continue (if one exists)
|
||||
if callback, exists := c.callbacks[c.index]; exists {
|
||||
callback()
|
||||
}
|
||||
|
||||
return c.index < len(c.nodes)
|
||||
}
|
||||
|
||||
func (c *callbackIterator) Node() *enode.Node {
|
||||
if c.index >= len(c.nodes) {
|
||||
return nil
|
||||
}
|
||||
|
||||
node := c.nodes[c.index]
|
||||
c.index++
|
||||
return node
|
||||
}
|
||||
|
||||
func (c *callbackIterator) Close() {
|
||||
// Nothing to clean up for this simple implementation
|
||||
}
|
||||
|
||||
func TestFindPeers_received_bad_existing_node(t *testing.T) {
|
||||
// This test successfully triggers delete(nodeByNodeID, node.ID()) in subnets.go by:
|
||||
// 1. Processing node1_seq1 first (passes filterPeer, gets added to map
|
||||
// 2. Callback marks peer as bad before processing node1_seq2"
|
||||
// 3. Processing node1_seq2 (fails filterPeer, triggers delete since ok=true
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
// Create LocalNode with same ID but different sequences
|
||||
localNode1 := createTestNodeWithID(t, "testnode")
|
||||
node1_seq1 := localNode1.Node() // Get current node
|
||||
currentSeq := node1_seq1.Seq()
|
||||
setNodeSeq(localNode1, currentSeq+1) // Increment sequence by 1
|
||||
node1_seq2 := localNode1.Node() // This should have higher seq
|
||||
|
||||
// Additional node to ensure we have enough peers to process
|
||||
localNode2 := createTestNodeWithID(t, "othernode")
|
||||
node2 := localNode2.Node()
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
},
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Create iterator with callback that marks peer as bad before processing node1_seq2
|
||||
iter := &callbackIterator{
|
||||
nodes: []*enode.Node{node1_seq1, node1_seq2, node2},
|
||||
index: 0,
|
||||
callbacks: map[int]func(){
|
||||
1: func() { // Before processing node1_seq2 (index 1)
|
||||
// Mark peer as bad before processing node1_seq2
|
||||
peerData, _, _ := convertToAddrInfo(node1_seq2)
|
||||
if peerData != nil {
|
||||
service.peers.Add(node1_seq2.Record(), peerData.ID, nil, network.DirUnknown)
|
||||
// Mark as bad peer - need enough increments to exceed threshold (6)
|
||||
for i := 0; i < 10; i++ {
|
||||
service.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
|
||||
|
||||
// Run findPeers - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
|
||||
ctxWithTimeout, cancel := context.WithTimeout(t.Context(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := service.findPeers(ctxWithTimeout, 3)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(result))
|
||||
}
|
||||
|
||||
@@ -13,10 +13,17 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errEth2ENRDigestMismatch = errors.New("fork digest of peer does not match local value")
|
||||
var (
|
||||
errForkScheduleMismatch = errors.New("peer fork schedule incompatible")
|
||||
errCurrentDigestMismatch = errors.Wrap(errForkScheduleMismatch, "current_fork_digest mismatch")
|
||||
errNextVersionMismatch = errors.Wrap(errForkScheduleMismatch, "next_fork_version mismatch")
|
||||
errNextDigestMismatch = errors.Wrap(errForkScheduleMismatch, "nfd (next fork digest) mismatch")
|
||||
)
|
||||
|
||||
// ENR key used for Ethereum consensus-related fork data.
|
||||
var eth2ENRKey = params.BeaconNetworkConfig().ETH2Key
|
||||
const (
|
||||
eth2EnrKey = "eth2" // The `eth2` ENR entry advertizes the node's view of the fork schedule with an ssz-encoded ENRForkID value.
|
||||
nfdEnrKey = "nfd" // The `nfd` ENR entry separately advertizes the "next fork digest" aspect of the fork schedule.
|
||||
)
|
||||
|
||||
// ForkDigest returns the current fork digest of
|
||||
// the node according to the local clock.
|
||||
@@ -33,44 +40,86 @@ func (s *Service) currentForkDigest() ([4]byte, error) {
|
||||
// Compares fork ENRs between an incoming peer's record and our node's
|
||||
// local record values for current and next fork version/epoch.
|
||||
func compareForkENR(self, peer *enr.Record) error {
|
||||
peerForkENR, err := forkEntry(peer)
|
||||
peerEntry, err := forkEntry(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentForkENR, err := forkEntry(self)
|
||||
selfEntry, err := forkEntry(self)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enrString, err := SerializeENR(peer)
|
||||
peerString, err := SerializeENR(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Clients SHOULD connect to peers with current_fork_digest, next_fork_version,
|
||||
// and next_fork_epoch that match local values.
|
||||
if !bytes.Equal(peerForkENR.CurrentForkDigest, currentForkENR.CurrentForkDigest) {
|
||||
return errors.Wrapf(errEth2ENRDigestMismatch,
|
||||
if !bytes.Equal(peerEntry.CurrentForkDigest, selfEntry.CurrentForkDigest) {
|
||||
return errors.Wrapf(errCurrentDigestMismatch,
|
||||
"fork digest of peer with ENR %s: %v, does not match local value: %v",
|
||||
enrString,
|
||||
peerForkENR.CurrentForkDigest,
|
||||
currentForkENR.CurrentForkDigest,
|
||||
peerString,
|
||||
peerEntry.CurrentForkDigest,
|
||||
selfEntry.CurrentForkDigest,
|
||||
)
|
||||
}
|
||||
|
||||
// Clients MAY connect to peers with the same current_fork_version but a
|
||||
// different next_fork_version/next_fork_epoch. Unless ENRForkID is manually
|
||||
// updated to matching prior to the earlier next_fork_epoch of the two clients,
|
||||
// these type of connecting clients will be unable to successfully interact
|
||||
// starting at the earlier next_fork_epoch.
|
||||
if peerForkENR.NextForkEpoch != currentForkENR.NextForkEpoch {
|
||||
if peerEntry.NextForkEpoch != selfEntry.NextForkEpoch {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerNextForkEpoch": peerForkENR.NextForkEpoch,
|
||||
"peerENR": enrString,
|
||||
"peerNextForkEpoch": peerEntry.NextForkEpoch,
|
||||
"peerNextForkVersion": peerEntry.NextForkVersion,
|
||||
"peerENR": peerString,
|
||||
}).Trace("Peer matches fork digest but has different next fork epoch")
|
||||
// We allow the connection because we have a different view of the next fork epoch. This
|
||||
// could be due to peers that have no upgraded ahead of a fork or BPO schedule change, so
|
||||
// we allow the connection to continue until the fork boundary.
|
||||
return nil
|
||||
}
|
||||
if !bytes.Equal(peerForkENR.NextForkVersion, currentForkENR.NextForkVersion) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerNextForkVersion": peerForkENR.NextForkVersion,
|
||||
"peerENR": enrString,
|
||||
}).Trace("Peer matches fork digest but has different next fork version")
|
||||
|
||||
// Since we agree on the next fork epoch, we require next fork version to also be in agreement.
|
||||
if !bytes.Equal(peerEntry.NextForkVersion, selfEntry.NextForkVersion) {
|
||||
return errors.Wrapf(errNextVersionMismatch,
|
||||
"next fork version of peer with ENR %s: %#x, does not match local value: %#x",
|
||||
peerString, peerEntry.NextForkVersion, selfEntry.NextForkVersion)
|
||||
}
|
||||
|
||||
// Fulu adds the following to the spec:
|
||||
// ---
|
||||
// A new entry is added to the ENR under the key nfd, short for next fork digest. This entry
|
||||
// communicates the digest of the next scheduled fork, regardless of whether it is a regular
|
||||
// or a Blob-Parameters-Only fork. This new entry MUST be added once FULU_FORK_EPOCH is assigned
|
||||
// any value other than FAR_FUTURE_EPOCH. Adding this entry prior to the Fulu fork will not
|
||||
// impact peering as nodes will ignore unknown ENR entries and nfd mismatches do not cause
|
||||
// disconnects.
|
||||
// When discovering and interfacing with peers, nodes MUST evaluate nfd alongside their existing
|
||||
// consideration of the ENRForkID::next_* fields under the eth2 key, to form a more accurate
|
||||
// view of the peer's intended next fork for the purposes of sustained peering. If there is a
|
||||
// mismatch, the node MUST NOT disconnect before the fork boundary, but it MAY disconnect
|
||||
// at/after the fork boundary.
|
||||
|
||||
// Nodes unprepared to follow the Fulu fork will be unaware of nfd entries. However, their
|
||||
// existing comparison of eth2 entries (concretely next_fork_epoch) is sufficient to detect
|
||||
// upcoming divergence.
|
||||
// ---
|
||||
|
||||
// Because this is a new in-bound connection, we lean into the pre-fulu point that clients
|
||||
// MAY connect to peers with the same current_fork_version but a different
|
||||
// next_fork_version/next_fork_epoch, which implies we can chose to not connect to them when these
|
||||
// don't match.
|
||||
//
|
||||
// Given that the next_fork_epoch matches, we will require the next_fork_digest to match.
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
peerNFD, selfNFD := nfd(peer), nfd(self)
|
||||
if peerNFD != selfNFD {
|
||||
return errors.Wrapf(errNextDigestMismatch,
|
||||
"next fork digest of peer with ENR %s: %v, does not match local value: %v",
|
||||
peerString, peerNFD, selfNFD)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -81,16 +130,28 @@ func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) e
|
||||
NextForkVersion: next.ForkVersion[:],
|
||||
NextForkEpoch: next.Epoch,
|
||||
}
|
||||
log.
|
||||
WithField("CurrentForkDigest", fmt.Sprintf("%#x", enrForkID.CurrentForkDigest)).
|
||||
WithField("NextForkVersion", fmt.Sprintf("%#x", enrForkID.NextForkVersion)).
|
||||
WithField("NextForkEpoch", fmt.Sprintf("%d", enrForkID.NextForkEpoch)).
|
||||
Info("Updating ENR Fork ID")
|
||||
if entry.Epoch == next.Epoch {
|
||||
enrForkID.NextForkEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"CurrentForkDigest": fmt.Sprintf("%#x", enrForkID.CurrentForkDigest),
|
||||
"NextForkVersion": fmt.Sprintf("%#x", enrForkID.NextForkVersion),
|
||||
"NextForkEpoch": fmt.Sprintf("%d", enrForkID.NextForkEpoch),
|
||||
}
|
||||
if params.BeaconConfig().FuluForkEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if entry.ForkDigest == next.ForkDigest {
|
||||
node.Set(enr.WithEntry(nfdEnrKey, make([]byte, len(next.ForkDigest))))
|
||||
} else {
|
||||
node.Set(enr.WithEntry(nfdEnrKey, next.ForkDigest[:]))
|
||||
}
|
||||
logFields["NextForkDigest"] = fmt.Sprintf("%#x", next.ForkDigest)
|
||||
}
|
||||
log.WithFields(logFields).Info("Updating ENR Fork ID")
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
forkEntry := enr.WithEntry(eth2ENRKey, enc)
|
||||
forkEntry := enr.WithEntry(eth2EnrKey, enc)
|
||||
node.Set(forkEntry)
|
||||
return nil
|
||||
}
|
||||
@@ -99,7 +160,7 @@ func updateENR(node *enode.LocalNode, entry, next params.NetworkScheduleEntry) e
|
||||
// under the Ethereum consensus EnrKey
|
||||
func forkEntry(record *enr.Record) (*pb.ENRForkID, error) {
|
||||
sszEncodedForkEntry := make([]byte, 16)
|
||||
entry := enr.WithEntry(eth2ENRKey, &sszEncodedForkEntry)
|
||||
entry := enr.WithEntry(eth2EnrKey, &sszEncodedForkEntry)
|
||||
err := record.Load(entry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -110,3 +171,15 @@ func forkEntry(record *enr.Record) (*pb.ENRForkID, error) {
|
||||
}
|
||||
return forkEntry, nil
|
||||
}
|
||||
|
||||
// nfd retrieves the value of the `nfd` ("next fork digest") key from an ENR record.
|
||||
func nfd(record *enr.Record) [4]byte {
|
||||
digest := [4]byte{}
|
||||
entry := enr.WithEntry(nfdEnrKey, &digest)
|
||||
if err := record.Load(entry); err != nil {
|
||||
// Treat a missing nfd entry as an empty digest.
|
||||
// We do this to avoid errors when checking peers that have not upgraded for fulu.
|
||||
return [4]byte{}
|
||||
}
|
||||
return digest
|
||||
}
|
||||
|
||||
@@ -16,14 +16,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestCompareForkENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
logrus.SetLevel(logrus.TraceLevel)
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
@@ -61,10 +59,10 @@ func TestCompareForkENR(t *testing.T) {
|
||||
require.NoError(t, updateENR(peer, currentCopy, next))
|
||||
return peer.Node()
|
||||
},
|
||||
expectErr: errEth2ENRDigestMismatch,
|
||||
expectErr: errCurrentDigestMismatch,
|
||||
},
|
||||
{
|
||||
name: "next fork version mismatch",
|
||||
name: "next_fork_epoch match, next_fork_version mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
@@ -75,25 +73,44 @@ func TestCompareForkENR(t *testing.T) {
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
expectLog: "Peer matches fork digest but has different next fork version",
|
||||
expectErr: errNextVersionMismatch,
|
||||
},
|
||||
{
|
||||
name: "next fork epoch mismatch",
|
||||
name: "next fork epoch mismatch, next fork digest mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
// Create a peer with the same current fork digest and next fork version/epoch.
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
nextCopy := next
|
||||
// next epoch does not match, and neither does the next fork digest.
|
||||
nextCopy.Epoch = nextCopy.Epoch + 1
|
||||
nfd := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
require.NotEqual(t, next.ForkDigest, nfd)
|
||||
//peer.Set(enr.WithEntry(nfdEnrKey, nfd[:]))
|
||||
nextCopy.ForkDigest = nfd
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
expectLog: "Peer matches fork digest but has different next fork epoch",
|
||||
// no error because we allow a different next fork version / digest if the next fork epoch does not match
|
||||
},
|
||||
{
|
||||
name: "next fork epoch -match-, next fork digest mismatch",
|
||||
node: func(t *testing.T) *enode.Node {
|
||||
peer := enode.NewLocalNode(db, k)
|
||||
nextCopy := next
|
||||
nfd := [4]byte{0xFF, 0xFF, 0xFF, 0xFF}
|
||||
// next epoch *does match*, but the next fork digest doesn't - so we should get an error.
|
||||
require.NotEqual(t, next.ForkDigest, nfd)
|
||||
nextCopy.ForkDigest = nfd
|
||||
//peer.Set(enr.WithEntry(nfdEnrKey, nfd[:]))
|
||||
require.NoError(t, updateENR(peer, current, nextCopy))
|
||||
return peer.Node()
|
||||
},
|
||||
expectErr: errNextDigestMismatch,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
peer := c.node(t)
|
||||
err := compareForkENR(self.Node().Record(), peer.Record())
|
||||
if c.expectErr != nil {
|
||||
@@ -101,13 +118,27 @@ func TestCompareForkENR(t *testing.T) {
|
||||
} else {
|
||||
require.NoError(t, err, "Expected no error comparing fork ENRs")
|
||||
}
|
||||
if c.expectLog != "" {
|
||||
require.LogsContain(t, hook, c.expectLog, "Expected log message not found")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNfdSetAndLoad(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
db, err := enode.OpenDB("")
|
||||
assert.NoError(t, err)
|
||||
_, k := createAddrAndPrivKey(t)
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
current := params.GetNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next := params.NextNetworkScheduleEntry(clock.CurrentEpoch())
|
||||
next.ForkDigest = [4]byte{0xFF, 0xFF, 0xFF, 0xFF} // Ensure a unique digest for testing.
|
||||
self := enode.NewLocalNode(db, k)
|
||||
require.NoError(t, updateENR(self, current, next))
|
||||
n := nfd(self.Node().Record())
|
||||
assert.Equal(t, next.ForkDigest, n, "Expected nfd to match next fork digest")
|
||||
}
|
||||
|
||||
func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
@@ -122,7 +153,7 @@ func TestDiscv5_AddRetrieveForkEntryENR(t *testing.T) {
|
||||
}
|
||||
enc, err := enrForkID.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
entry := enr.WithEntry(eth2ENRKey, enc)
|
||||
entry := enr.WithEntry(eth2EnrKey, enc)
|
||||
temp := t.TempDir()
|
||||
randNum := rand.Int()
|
||||
tempPath := path.Join(temp, strconv.Itoa(randNum))
|
||||
@@ -177,3 +208,111 @@ func TestAddForkEntry_NextForkVersion(t *testing.T) {
|
||||
"Wanted Next Fork Version to be equal to last entry in schedule")
|
||||
|
||||
}
|
||||
|
||||
func TestUpdateENR_FuluForkDigest(t *testing.T) {
|
||||
setupTest := func(t *testing.T, fuluEnabled bool) (*enode.LocalNode, func()) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
if fuluEnabled {
|
||||
cfg.FuluForkEpoch = 100
|
||||
} else {
|
||||
cfg.FuluForkEpoch = cfg.FarFutureEpoch
|
||||
}
|
||||
cfg.FuluForkVersion = []byte{5, 0, 0, 0}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
cfg.InitializeForkSchedule()
|
||||
|
||||
pkey, err := privKey(&Config{DataDir: t.TempDir()})
|
||||
require.NoError(t, err, "Could not get private key")
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
|
||||
localNode := enode.NewLocalNode(db, pkey)
|
||||
cleanup := func() {
|
||||
db.Close()
|
||||
}
|
||||
|
||||
return localNode, cleanup
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fuluEnabled bool
|
||||
currentEntry params.NetworkScheduleEntry
|
||||
nextEntry params.NetworkScheduleEntry
|
||||
validateNFD func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry)
|
||||
}{
|
||||
{
|
||||
name: "different digests sets nfd to next digest",
|
||||
fuluEnabled: true,
|
||||
currentEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 50,
|
||||
ForkDigest: [4]byte{1, 2, 3, 4},
|
||||
ForkVersion: [4]byte{1, 0, 0, 0},
|
||||
},
|
||||
nextEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 100,
|
||||
ForkDigest: [4]byte{5, 6, 7, 8}, // Different from current
|
||||
ForkVersion: [4]byte{2, 0, 0, 0},
|
||||
},
|
||||
validateNFD: func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry) {
|
||||
var nfdValue []byte
|
||||
err := localNode.Node().Record().Load(enr.WithEntry(nfdEnrKey, &nfdValue))
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, nextEntry.ForkDigest[:], nfdValue, "nfd entry should equal next fork digest")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "same digests sets nfd to empty",
|
||||
fuluEnabled: true,
|
||||
currentEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 50,
|
||||
ForkDigest: [4]byte{1, 2, 3, 4},
|
||||
ForkVersion: [4]byte{1, 0, 0, 0},
|
||||
},
|
||||
nextEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 100,
|
||||
ForkDigest: [4]byte{1, 2, 3, 4}, // Same as current
|
||||
ForkVersion: [4]byte{2, 0, 0, 0},
|
||||
},
|
||||
validateNFD: func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry) {
|
||||
var nfdValue []byte
|
||||
err := localNode.Node().Record().Load(enr.WithEntry(nfdEnrKey, &nfdValue))
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, make([]byte, len(nextEntry.ForkDigest)), nfdValue, "nfd entry should be empty bytes when digests are same")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu disabled does not add nfd field",
|
||||
fuluEnabled: false,
|
||||
currentEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 50,
|
||||
ForkDigest: [4]byte{1, 2, 3, 4},
|
||||
ForkVersion: [4]byte{1, 0, 0, 0},
|
||||
},
|
||||
nextEntry: params.NetworkScheduleEntry{
|
||||
Epoch: 100,
|
||||
ForkDigest: [4]byte{5, 6, 7, 8}, // Different from current
|
||||
ForkVersion: [4]byte{2, 0, 0, 0},
|
||||
},
|
||||
validateNFD: func(t *testing.T, localNode *enode.LocalNode, nextEntry params.NetworkScheduleEntry) {
|
||||
var nfdValue []byte
|
||||
err := localNode.Node().Record().Load(enr.WithEntry(nfdEnrKey, &nfdValue))
|
||||
require.ErrorContains(t, "missing ENR key", err, "nfd field should not be present when Fulu fork is disabled")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
localNode, cleanup := setupTest(t, tt.fuluEnabled)
|
||||
defer cleanup()
|
||||
|
||||
currentEntry := tt.currentEntry
|
||||
nextEntry := tt.nextEntry
|
||||
require.NoError(t, updateENR(localNode, currentEntry, nextEntry))
|
||||
tt.validateNFD(t, localNode, nextEntry)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ type (
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumn(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
BroadcastDataColumnSidecar(root [fieldparams.RootLength]byte, columnSubnet uint64, dataColumnSidecar *ethpb.DataColumnSidecar) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestScorers_Gossip_Score(t *testing.T) {
|
||||
},
|
||||
check: func(scorer *scorers.GossipScorer) {
|
||||
assert.Equal(t, 10.0, scorer.Score("peer1"), "Unexpected score")
|
||||
assert.Equal(t, nil, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
assert.NoError(t, scorer.IsBadPeer("peer1"), "Unexpected bad peer")
|
||||
_, _, topicMap, err := scorer.GossipData("peer1")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100), topicMap["a"].TimeInMesh, "incorrect time in mesh")
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -40,49 +41,68 @@ func (s *Service) setAllForkDigests() {
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errNotReadyToSubscribe = fmt.Errorf("not ready to subscribe, service is not initialized")
|
||||
errMissingLeadingSlash = fmt.Errorf("topic is missing leading slash")
|
||||
errTopicMissingProtocolVersion = fmt.Errorf("topic is missing protocol version (eth2)")
|
||||
errTopicPathWrongPartCount = fmt.Errorf("topic path has wrong part count")
|
||||
errDigestInvalid = fmt.Errorf("digest is invalid")
|
||||
errDigestUnexpected = fmt.Errorf("digest is unexpected")
|
||||
errSnappySuffixMissing = fmt.Errorf("snappy suffix is missing")
|
||||
errTopicNotFound = fmt.Errorf("topic not found in gossip topic mappings")
|
||||
)
|
||||
|
||||
// CanSubscribe returns true if the topic is of interest and we could subscribe to it.
|
||||
func (s *Service) CanSubscribe(topic string) bool {
|
||||
if !s.isInitialized() {
|
||||
if err := s.checkSubscribable(topic); err != nil {
|
||||
if !errors.Is(err, errNotReadyToSubscribe) {
|
||||
logrus.WithError(err).WithField("topic", topic).Debug("CanSubscribe failed")
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Service) checkSubscribable(topic string) error {
|
||||
if !s.isInitialized() {
|
||||
return errNotReadyToSubscribe
|
||||
}
|
||||
parts := strings.Split(topic, "/")
|
||||
if len(parts) != 5 {
|
||||
return false
|
||||
return errTopicPathWrongPartCount
|
||||
}
|
||||
// The topic must start with a slash, which means the first part will be empty.
|
||||
if parts[0] != "" {
|
||||
return false
|
||||
return errMissingLeadingSlash
|
||||
}
|
||||
if parts[1] != "eth2" {
|
||||
return false
|
||||
protocol, rawDigest, suffix := parts[1], parts[2], parts[4]
|
||||
if protocol != "eth2" {
|
||||
return errTopicMissingProtocolVersion
|
||||
}
|
||||
if suffix != encoder.ProtocolSuffixSSZSnappy {
|
||||
return errSnappySuffixMissing
|
||||
}
|
||||
|
||||
var digest [4]byte
|
||||
dl, err := hex.Decode(digest[:], []byte(parts[2]))
|
||||
if err == nil && dl != 4 {
|
||||
err = fmt.Errorf("expected 4 bytes, got %d", dl)
|
||||
}
|
||||
dl, err := hex.Decode(digest[:], []byte(rawDigest))
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("topic", topic).WithField("digest", parts[2]).Error("CanSubscribe failed to parse message")
|
||||
return false
|
||||
return errors.Wrapf(errDigestInvalid, "%v", err)
|
||||
}
|
||||
if dl != 4 {
|
||||
return errors.Wrapf(errDigestInvalid, "wrong byte length")
|
||||
}
|
||||
if _, ok := s.allForkDigests[digest]; !ok {
|
||||
log.WithField("topic", topic).WithField("digest", fmt.Sprintf("%#x", digest)).Error("CanSubscribe failed to find digest in allForkDigests")
|
||||
return false
|
||||
}
|
||||
|
||||
if parts[4] != encoder.ProtocolSuffixSSZSnappy {
|
||||
return false
|
||||
return errDigestUnexpected
|
||||
}
|
||||
|
||||
// Check the incoming topic matches any topic mapping. This includes a check for part[3].
|
||||
for gt := range gossipTopicMappings {
|
||||
if _, err := scanfcheck(strings.Join(parts[0:4], "/"), gt); err == nil {
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return errTopicNotFound
|
||||
}
|
||||
|
||||
// FilterIncomingSubscriptions is invoked for all RPCs containing subscription notifications.
|
||||
@@ -100,7 +120,22 @@ func (s *Service) FilterIncomingSubscriptions(peerID peer.ID, subs []*pubsubpb.R
|
||||
return nil, pubsub.ErrTooManySubscriptions
|
||||
}
|
||||
|
||||
return pubsub.FilterSubscriptions(subs, s.CanSubscribe), nil
|
||||
return pubsub.FilterSubscriptions(subs, s.logCheckSubscribableError(peerID)), nil
|
||||
}
|
||||
|
||||
func (s *Service) logCheckSubscribableError(pid peer.ID) func(string) bool {
|
||||
return func(topic string) bool {
|
||||
if err := s.checkSubscribable(topic); err != nil {
|
||||
if !errors.Is(err, errNotReadyToSubscribe) {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"peerID": pid,
|
||||
"topic": topic,
|
||||
}).Debug("Peer subscription rejected")
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// scanfcheck uses fmt.Sscanf to check that a given string matches expected format. This method
|
||||
|
||||
@@ -169,7 +169,7 @@ var (
|
||||
RPCDataColumnSidecarsByRangeTopicV1: new(pb.DataColumnSidecarsByRangeRequest),
|
||||
|
||||
// DataColumnSidecarsByRoot v1 Message
|
||||
RPCDataColumnSidecarsByRootTopicV1: new(p2ptypes.DataColumnsByRootIdentifiers),
|
||||
RPCDataColumnSidecarsByRootTopicV1: p2ptypes.DataColumnsByRootIdentifiers{},
|
||||
}
|
||||
|
||||
// Maps all registered protocol prefixes.
|
||||
|
||||
@@ -13,13 +13,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v6/time"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -30,48 +30,6 @@ import (
|
||||
|
||||
const testPingInterval = 100 * time.Millisecond
|
||||
|
||||
type mockListener struct {
|
||||
localNode *enode.LocalNode
|
||||
}
|
||||
|
||||
func (m mockListener) Self() *enode.Node {
|
||||
return m.localNode.Node()
|
||||
}
|
||||
|
||||
func (mockListener) Close() {
|
||||
// no-op
|
||||
}
|
||||
|
||||
func (mockListener) Lookup(enode.ID) []*enode.Node {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) ReadRandomNodes(_ []*enode.Node) int {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) Resolve(*enode.Node) *enode.Node {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) Ping(*enode.Node) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) RequestENR(*enode.Node) (*enode.Node, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) LocalNode() *enode.LocalNode {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) RandomNodes() enode.Iterator {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (mockListener) RebootListener() error { panic("implement me") }
|
||||
|
||||
func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP) {
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
@@ -87,7 +45,7 @@ func TestService_Stop_SetsStartedToFalse(t *testing.T) {
|
||||
s, err := NewService(t.Context(), &Config{StateNotifier: &mock.MockStateNotifier{}, DB: testDB.SetupDB(t)})
|
||||
require.NoError(t, err)
|
||||
s.started = true
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
assert.NoError(t, s.Stop())
|
||||
assert.Equal(t, false, s.started)
|
||||
}
|
||||
@@ -113,7 +71,7 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
}
|
||||
s, err := NewService(t.Context(), cfg)
|
||||
require.NoError(t, err)
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
s.custodyInfo = &custodyInfo{}
|
||||
exitRoutine := make(chan bool)
|
||||
go func() {
|
||||
@@ -133,14 +91,14 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
func TestService_Status_NotRunning(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s := &Service{started: false}
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
assert.ErrorContains(t, "not running", s.Status(), "Status returned wrong error")
|
||||
}
|
||||
|
||||
func TestService_Status_NoGenesisTimeSet(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
s := &Service{started: true}
|
||||
s.dv5Listener = &mockListener{}
|
||||
s.dv5Listener = testp2p.NewMockListener(nil, nil)
|
||||
assert.ErrorContains(t, "no genesis time set", s.Status(), "Status returned wrong error")
|
||||
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
@@ -134,6 +134,24 @@ func (s *Service) FindAndDialPeersWithSubnets(
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDefectiveSubnets updates the defective subnets map when a node with matching subnets is found.
|
||||
// It decrements the defective count for each subnet the node satisfies and removes subnets
|
||||
// that are fully satisfied (count reaches 0).
|
||||
func updateDefectiveSubnets(
|
||||
nodeSubnets map[uint64]bool,
|
||||
defectiveSubnets map[uint64]int,
|
||||
) {
|
||||
for subnet := range defectiveSubnets {
|
||||
if !nodeSubnets[subnet] {
|
||||
continue
|
||||
}
|
||||
defectiveSubnets[subnet]--
|
||||
if defectiveSubnets[subnet] == 0 {
|
||||
delete(defectiveSubnets, subnet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// findPeersWithSubnets finds peers subscribed to defective subnets in batches
|
||||
// until enough peers are found or the context is canceled.
|
||||
// It returns new peers found during the search.
|
||||
@@ -169,6 +187,7 @@ func (s *Service) findPeersWithSubnets(
|
||||
|
||||
// Crawl the network for peers subscribed to the defective subnets.
|
||||
nodeByNodeID := make(map[enode.ID]*enode.Node)
|
||||
|
||||
for len(defectiveSubnets) > 0 && iterator.Next() {
|
||||
if err := ctx.Err(); err != nil {
|
||||
// Convert the map to a slice.
|
||||
@@ -180,14 +199,28 @@ func (s *Service) findPeersWithSubnets(
|
||||
return peersToDial, err
|
||||
}
|
||||
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
node := iterator.Node()
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() >= node.Seq() {
|
||||
continue // keep existing and skip.
|
||||
}
|
||||
|
||||
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
|
||||
// Skip peer not matching the filter.
|
||||
if !s.filterPeer(node) {
|
||||
if ok {
|
||||
// this means the existing peer with the lower sequence number is no longer valid
|
||||
delete(nodeByNodeID, existing.ID())
|
||||
// Note: We are choosing to not rollback changes to the defective subnets map in favor of calling s.defectiveSubnets once again after dialing peers.
|
||||
// This is a case that should rarely happen and should be handled through a second iteration in FindAndDialPeersWithSubnets
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
nodeSubnets, err := filter(node)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filter node")
|
||||
@@ -196,30 +229,14 @@ func (s *Service) findPeersWithSubnets(
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() > node.Seq() {
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
// We found a new peer. Modify the defective subnets map
|
||||
// and the filter accordingly.
|
||||
for subnet := range defectiveSubnets {
|
||||
if !nodeSubnets[subnet] {
|
||||
continue
|
||||
}
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
defectiveSubnets[subnet]--
|
||||
|
||||
if defectiveSubnets[subnet] == 0 {
|
||||
delete(defectiveSubnets, subnet)
|
||||
}
|
||||
|
||||
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
updateDefectiveSubnets(nodeSubnets, defectiveSubnets)
|
||||
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,14 +10,19 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
testDB "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
ecdsaprysm "github.com/OffchainLabs/prysm/v6/crypto/ecdsa"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
)
|
||||
|
||||
@@ -541,3 +546,552 @@ func TestInitializePersistentSubnets(t *testing.T) {
|
||||
assert.Equal(t, 2, len(subs))
|
||||
assert.Equal(t, true, expTime.After(time.Now()))
|
||||
}
|
||||
|
||||
func TestFindPeersWithSubnets_NodeDeduplication(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
localNode2 := createTestNodeWithID(t, "node2")
|
||||
localNode3 := createTestNodeWithID(t, "node3")
|
||||
|
||||
// Create different sequence versions of node1 with subnet 1
|
||||
setNodeSubnets(localNode1, []uint64{1})
|
||||
setNodeSeq(localNode1, 1)
|
||||
node1_seq1_subnet1 := localNode1.Node()
|
||||
setNodeSeq(localNode1, 2)
|
||||
node1_seq2_subnet1 := localNode1.Node() // Same ID, higher seq
|
||||
setNodeSeq(localNode1, 3)
|
||||
node1_seq3_subnet1 := localNode1.Node() // Same ID, even higher seq
|
||||
|
||||
// Node2 with different sequences and subnets
|
||||
setNodeSubnets(localNode2, []uint64{1})
|
||||
node2_seq1_subnet1 := localNode2.Node()
|
||||
setNodeSubnets(localNode2, []uint64{2}) // Different subnet
|
||||
setNodeSeq(localNode2, 2)
|
||||
node2_seq2_subnet2 := localNode2.Node()
|
||||
|
||||
// Node3 with multiple subnets
|
||||
setNodeSubnets(localNode3, []uint64{1, 2})
|
||||
node3_seq1_subnet1_2 := localNode3.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*enode.Node
|
||||
defectiveSubnets map[uint64]int
|
||||
expectedCount int
|
||||
description string
|
||||
eval func(t *testing.T, result []*enode.Node) // Custom validation function
|
||||
}{
|
||||
{
|
||||
name: "No duplicates - unique nodes with same subnet",
|
||||
nodes: []*enode.Node{
|
||||
node2_seq1_subnet1,
|
||||
node3_seq1_subnet1_2,
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should return all unique nodes subscribed to subnet",
|
||||
eval: nil, // No special validation needed
|
||||
},
|
||||
{
|
||||
name: "Duplicate with lower seq first - should replace",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_subnet1,
|
||||
node1_seq2_subnet1, // Higher seq, should replace
|
||||
node2_seq1_subnet1, // Different node to ensure we process enough nodes
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
expectedCount: 2,
|
||||
description: "Should replace with higher seq node for same subnet",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
found := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2_subnet1.ID() && node.Seq() == node1_seq2_subnet1.Seq() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Should have node with higher seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with higher seq first - should keep existing",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq3_subnet1, // Higher seq
|
||||
node1_seq2_subnet1, // Lower seq, should be skipped (continue branch)
|
||||
node1_seq1_subnet1, // Even lower seq, should also be skipped (continue branch)
|
||||
node2_seq1_subnet1, // Different node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should keep existing node with higher seq and skip lower seq duplicates",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
found := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_subnet1.ID() && node.Seq() == node1_seq3_subnet1.Seq() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Should have node with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple updates for same node",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_subnet1,
|
||||
node1_seq2_subnet1, // Should replace seq1
|
||||
node1_seq3_subnet1, // Should replace seq2
|
||||
node2_seq1_subnet1, // Different node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should keep updating to highest seq",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
found := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_subnet1.ID() && node.Seq() == node1_seq3_subnet1.Seq() {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, found, "Should have node with highest seq")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Duplicate with equal seq in subnets - should skip",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq2_subnet1, // First occurrence
|
||||
node1_seq2_subnet1, // Same exact node instance, should be skipped (continue branch)
|
||||
node2_seq1_subnet1, // Different node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 2,
|
||||
description: "Should skip duplicate with equal sequence number in subnet search",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
node1Count := 0
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq2_subnet1.ID() {
|
||||
require.Equal(t, node1_seq2_subnet1.Seq(), node.Seq(), "Node1 should have expected seq")
|
||||
foundNode1 = true
|
||||
node1Count++
|
||||
}
|
||||
if node.ID() == node2_seq1_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundNode1, "Should have node1")
|
||||
require.Equal(t, true, foundNode2, "Should have node2")
|
||||
require.Equal(t, 1, node1Count, "Should have exactly one instance of node1")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mix with different subnets",
|
||||
nodes: []*enode.Node{
|
||||
node2_seq1_subnet1,
|
||||
node2_seq2_subnet2, // Higher seq but different subnet
|
||||
node3_seq1_subnet1_2,
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2, 2: 1},
|
||||
expectedCount: 2, // node2 (latest) and node3
|
||||
description: "Should handle nodes with different subnet subscriptions",
|
||||
eval: nil, // Basic count validation is sufficient
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
localNode2 := createTestNodeWithID(t, "node2")
|
||||
localNode3 := createTestNodeWithID(t, "node3")
|
||||
|
||||
// Create versions of node1 with subnet 1
|
||||
setNodeSubnets(localNode1, []uint64{1})
|
||||
setNodeSeq(localNode1, 1)
|
||||
node1_seq1_valid_subnet1 := localNode1.Node()
|
||||
|
||||
// Create bad version (higher seq)
|
||||
setNodeSeq(localNode1, 2)
|
||||
node1_seq2_bad_subnet1 := localNode1.Node()
|
||||
|
||||
// Create another valid version
|
||||
setNodeSeq(localNode1, 3)
|
||||
node1_seq3_valid_subnet1 := localNode1.Node()
|
||||
|
||||
// Node2 with subnet 1
|
||||
setNodeSubnets(localNode2, []uint64{1})
|
||||
node2_seq1_valid_subnet1 := localNode2.Node()
|
||||
|
||||
// Node3 with subnet 1 and 2
|
||||
setNodeSubnets(localNode3, []uint64{1, 2})
|
||||
node3_seq1_valid_subnet1_2 := localNode3.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
nodes []*enode.Node
|
||||
defectiveSubnets map[uint64]int
|
||||
expectedCount int
|
||||
description string
|
||||
eval func(t *testing.T, result []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "Valid node in subnet followed by bad version - should remove",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_valid_subnet1, // First add valid node with subnet 1
|
||||
node1_seq2_bad_subnet1, // Invalid version with higher seq - should delete
|
||||
node2_seq1_valid_subnet1, // Different valid node with subnet 1
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
expectedCount: 1, // Only node2 should remain
|
||||
description: "Should remove node from map when bad version arrives, even if it has required subnet",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq1_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should have been removed despite having subnet")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Bad node with subnet stays bad even with higher seq",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq2_bad_subnet1, // First bad node - not added
|
||||
node1_seq3_valid_subnet1, // Higher seq but same bad peer ID
|
||||
node2_seq1_valid_subnet1, // Different valid node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 1, // Only node2 (node1 remains bad)
|
||||
description: "Bad peer with subnet remains bad even with higher seq",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should remain bad despite having subnet")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Mixed valid and bad nodes with subnets",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_valid_subnet1, // Add valid node1 with subnet
|
||||
node2_seq1_valid_subnet1, // Add valid node2 with subnet
|
||||
node1_seq2_bad_subnet1, // Invalid update for node1 - should remove
|
||||
node3_seq1_valid_subnet1_2, // Add valid node3 with multiple subnets
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 3}, // Need 3 peers for subnet 1
|
||||
expectedCount: 2, // Only node2 and node3 should remain
|
||||
description: "Should handle removal of nodes with subnets when they become bad",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
foundNode3 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq1_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
if node.ID() == node3_seq1_valid_subnet1_2.ID() {
|
||||
foundNode3 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should have been removed")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
require.Equal(t, true, foundNode3, "Node3 should be present")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Node with subnet marked bad stays bad for all sequences",
|
||||
nodes: []*enode.Node{
|
||||
node1_seq1_valid_subnet1, // Add valid node1 with subnet
|
||||
node1_seq2_bad_subnet1, // Bad update - should remove and mark bad
|
||||
node1_seq3_valid_subnet1, // Higher seq but still same bad peer ID
|
||||
node2_seq1_valid_subnet1, // Different valid node
|
||||
},
|
||||
defectiveSubnets: map[uint64]int{1: 2},
|
||||
expectedCount: 1, // Only node2 (node1 stays bad)
|
||||
description: "Once marked bad, subnet peer stays bad for all sequences",
|
||||
eval: func(t *testing.T, result []*enode.Node) {
|
||||
foundNode1 := false
|
||||
foundNode2 := false
|
||||
for _, node := range result {
|
||||
if node.ID() == node1_seq3_valid_subnet1.ID() {
|
||||
foundNode1 = true
|
||||
}
|
||||
if node.ID() == node2_seq1_valid_subnet1.ID() {
|
||||
foundNode2 = true
|
||||
}
|
||||
}
|
||||
require.Equal(t, false, foundNode1, "Node1 should stay bad")
|
||||
require.Equal(t, true, foundNode2, "Node2 should be present")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Initialize flags for subnet operations
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
// Create test P2P instance
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
// Create mock service
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Mark specific node versions as "bad" to simulate filterPeer failures
|
||||
for _, node := range tt.nodes {
|
||||
if node == node1_seq2_bad_subnet1 {
|
||||
// Get peer ID from the node to mark it as bad
|
||||
peerData, _, _ := convertToAddrInfo(node)
|
||||
if peerData != nil {
|
||||
s.peers.Add(node.Record(), peerData.ID, nil, network.DirUnknown)
|
||||
// Mark as bad peer - this will make filterPeer return false
|
||||
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
s.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// callbackIterator allows us to execute callbacks at specific points during iteration
|
||||
type callbackIteratorForSubnets struct {
|
||||
nodes []*enode.Node
|
||||
index int
|
||||
callbacks map[int]func() // map from index to callback function
|
||||
}
|
||||
|
||||
func (c *callbackIteratorForSubnets) Next() bool {
|
||||
// Execute callback before checking if we can continue (if one exists)
|
||||
if callback, exists := c.callbacks[c.index]; exists {
|
||||
callback()
|
||||
}
|
||||
|
||||
return c.index < len(c.nodes)
|
||||
}
|
||||
|
||||
func (c *callbackIteratorForSubnets) Node() *enode.Node {
|
||||
if c.index >= len(c.nodes) {
|
||||
return nil
|
||||
}
|
||||
|
||||
node := c.nodes[c.index]
|
||||
c.index++
|
||||
return node
|
||||
}
|
||||
|
||||
func (c *callbackIteratorForSubnets) Close() {
|
||||
// Nothing to clean up for this simple implementation
|
||||
}
|
||||
|
||||
func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
// This test successfully triggers delete(nodeByNodeID, node.ID()) in subnets.go by:
|
||||
// 1. Processing node1_seq1 first (passes filterPeer, gets added to map
|
||||
// 2. Callback marks peer as bad before processing node1_seq2"
|
||||
// 3. Processing node1_seq2 (fails filterPeer, triggers delete since ok=true
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
// Create LocalNode with same ID but different sequences
|
||||
localNode1 := createTestNodeWithID(t, "testnode")
|
||||
setNodeSubnets(localNode1, []uint64{1})
|
||||
node1_seq1 := localNode1.Node() // Get current node
|
||||
currentSeq := node1_seq1.Seq()
|
||||
setNodeSeq(localNode1, currentSeq+1) // Increment sequence by 1
|
||||
node1_seq2 := localNode1.Node() // This should have higher seq
|
||||
|
||||
// Additional node to ensure we have enough peers to process
|
||||
localNode2 := createTestNodeWithID(t, "othernode")
|
||||
setNodeSubnets(localNode2, []uint64{1})
|
||||
node2 := localNode2.Node()
|
||||
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 1
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Create iterator with callback that marks peer as bad before processing node1_seq2
|
||||
iter := &callbackIteratorForSubnets{
|
||||
nodes: []*enode.Node{node1_seq1, node1_seq2, node2},
|
||||
index: 0,
|
||||
callbacks: map[int]func(){
|
||||
1: func() { // Before processing node1_seq2 (index 1)
|
||||
// Mark peer as bad before processing node1_seq2
|
||||
peerData, _, _ := convertToAddrInfo(node1_seq2)
|
||||
if peerData != nil {
|
||||
service.peers.Add(node1_seq2.Record(), peerData.ID, nil, network.DirUnknown)
|
||||
// Mark as bad peer - need enough increments to exceed threshold (6)
|
||||
for i := 0; i < 10; i++ {
|
||||
service.peers.Scorers().BadResponsesScorer().Increment(peerData.ID)
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
|
||||
|
||||
digest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run findPeersWithSubnets - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := service.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(result))
|
||||
require.Equal(t, localNode2.Node().ID(), result[0].ID()) // only node2 should remain
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"fuzz_p2p.go",
|
||||
"mock_broadcaster.go",
|
||||
"mock_host.go",
|
||||
"mock_listener.go",
|
||||
"mock_metadataprovider.go",
|
||||
"mock_peermanager.go",
|
||||
"mock_peersprovider.go",
|
||||
|
||||
@@ -167,8 +167,8 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumn(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
// BroadcastDataColumnSidecar -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumnSidecar(_ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -62,8 +62,8 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
128
beacon-chain/p2p/testing/mock_listener.go
Normal file
128
beacon-chain/p2p/testing/mock_listener.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
// MockListener is a mock implementation of the Listener and ListenerRebooter interfaces
|
||||
// that can be used in tests. It provides configurable behavior for all methods.
|
||||
type MockListener struct {
|
||||
LocalNodeFunc func() *enode.LocalNode
|
||||
SelfFunc func() *enode.Node
|
||||
RandomNodesFunc func() enode.Iterator
|
||||
LookupFunc func(enode.ID) []*enode.Node
|
||||
ResolveFunc func(*enode.Node) *enode.Node
|
||||
PingFunc func(*enode.Node) error
|
||||
RequestENRFunc func(*enode.Node) (*enode.Node, error)
|
||||
RebootFunc func() error
|
||||
CloseFunc func()
|
||||
|
||||
// Default implementations
|
||||
localNode *enode.LocalNode
|
||||
iterator enode.Iterator
|
||||
}
|
||||
|
||||
// NewMockListener creates a new MockListener with default implementations
|
||||
func NewMockListener(localNode *enode.LocalNode, iterator enode.Iterator) *MockListener {
|
||||
return &MockListener{
|
||||
localNode: localNode,
|
||||
iterator: iterator,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockListener) LocalNode() *enode.LocalNode {
|
||||
if m.LocalNodeFunc != nil {
|
||||
return m.LocalNodeFunc()
|
||||
}
|
||||
return m.localNode
|
||||
}
|
||||
|
||||
func (m *MockListener) Self() *enode.Node {
|
||||
if m.SelfFunc != nil {
|
||||
return m.SelfFunc()
|
||||
}
|
||||
if m.localNode != nil {
|
||||
return m.localNode.Node()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) RandomNodes() enode.Iterator {
|
||||
if m.RandomNodesFunc != nil {
|
||||
return m.RandomNodesFunc()
|
||||
}
|
||||
return m.iterator
|
||||
}
|
||||
|
||||
func (m *MockListener) Lookup(id enode.ID) []*enode.Node {
|
||||
if m.LookupFunc != nil {
|
||||
return m.LookupFunc(id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) Resolve(node *enode.Node) *enode.Node {
|
||||
if m.ResolveFunc != nil {
|
||||
return m.ResolveFunc(node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) Ping(node *enode.Node) error {
|
||||
if m.PingFunc != nil {
|
||||
return m.PingFunc(node)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) RequestENR(node *enode.Node) (*enode.Node, error) {
|
||||
if m.RequestENRFunc != nil {
|
||||
return m.RequestENRFunc(node)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockListener) RebootListener() error {
|
||||
if m.RebootFunc != nil {
|
||||
return m.RebootFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockListener) Close() {
|
||||
if m.CloseFunc != nil {
|
||||
m.CloseFunc()
|
||||
}
|
||||
}
|
||||
|
||||
// MockIterator is a mock implementation of enode.Iterator for testing
|
||||
type MockIterator struct {
|
||||
Nodes []*enode.Node
|
||||
Position int
|
||||
Closed bool
|
||||
}
|
||||
|
||||
func NewMockIterator(nodes []*enode.Node) *MockIterator {
|
||||
return &MockIterator{
|
||||
Nodes: nodes,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockIterator) Next() bool {
|
||||
if m.Closed || m.Position >= len(m.Nodes) {
|
||||
return false
|
||||
}
|
||||
m.Position++
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *MockIterator) Node() *enode.Node {
|
||||
if m.Position == 0 || m.Position > len(m.Nodes) {
|
||||
return nil
|
||||
}
|
||||
return m.Nodes[m.Position-1]
|
||||
}
|
||||
|
||||
func (m *MockIterator) Close() {
|
||||
m.Closed = true
|
||||
}
|
||||
@@ -63,6 +63,7 @@ type TestP2P struct {
|
||||
custodyInfoMut sync.RWMutex // protects custodyGroupCount and earliestAvailableSlot
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
enr *enr.Record
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
@@ -103,6 +104,7 @@ func NewTestP2P(t *testing.T, userOptions ...config.Option) *TestP2P {
|
||||
pubsub: ps,
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
peers: peerStatuses,
|
||||
enr: new(enr.Record),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -228,8 +230,8 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumn broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumn([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumnSidecar([fieldparams.RootLength]byte, uint64, *ethpb.DataColumnSidecar) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
@@ -310,8 +312,8 @@ func (p *TestP2P) Host() host.Host {
|
||||
}
|
||||
|
||||
// ENR returns the enr of the local peer.
|
||||
func (*TestP2P) ENR() *enr.Record {
|
||||
return new(enr.Record)
|
||||
func (p *TestP2P) ENR() *enr.Record {
|
||||
return p.enr
|
||||
}
|
||||
|
||||
// NodeID returns the node id of the local peer.
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
@@ -60,7 +61,6 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -72,6 +72,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"handlers_equivocation_test.go",
|
||||
"handlers_pool_test.go",
|
||||
"handlers_state_test.go",
|
||||
"handlers_test.go",
|
||||
@@ -83,6 +84,7 @@ go_test(
|
||||
"//api:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
@@ -123,7 +125,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
corehelpers "github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -32,7 +33,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -701,7 +701,7 @@ func (s *Server) publishBlockSSZ(ctx context.Context, w http.ResponseWriter, r *
|
||||
// Validate and optionally broadcast sidecars on equivocation.
|
||||
if err := s.validateBroadcast(ctx, r, genericBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock)
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock.Block)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
@@ -855,7 +855,7 @@ func (s *Server) publishBlock(ctx context.Context, w http.ResponseWriter, r *htt
|
||||
// Validate and optionally broadcast sidecars on equivocation.
|
||||
if err := s.validateBroadcast(ctx, r, genericBlock); err != nil {
|
||||
if errors.Is(err, errEquivocatedBlock) {
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock)
|
||||
b, err := blocks.NewSignedBeaconBlock(genericBlock.Block)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
@@ -942,14 +942,13 @@ func decodePhase0JSON(body []byte) (*eth.GenericSignedBeaconBlock, error) {
|
||||
// broadcastSidecarsIfSupported broadcasts blob sidecars when an equivocated block occurs.
|
||||
func broadcastSidecarsIfSupported(ctx context.Context, s *Server, b interfaces.SignedBeaconBlock, gb *eth.GenericSignedBeaconBlock, versionHeader string) error {
|
||||
switch versionHeader {
|
||||
case version.String(version.Fulu):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetFulu().Blobs, gb.GetFulu().KzgProofs)
|
||||
case version.String(version.Electra):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetElectra().Blobs, gb.GetElectra().KzgProofs)
|
||||
case version.String(version.Deneb):
|
||||
return s.broadcastSeenBlockSidecars(ctx, b, gb.GetDeneb().Blobs, gb.GetDeneb().KzgProofs)
|
||||
default:
|
||||
// other forks before Deneb do not support blob sidecars
|
||||
// forks after fulu do not support blob sidecars, instead support data columns, no need to rebroadcast
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -1053,7 +1052,7 @@ func (s *Server) validateConsensus(ctx context.Context, b *eth.GenericSignedBeac
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.validateBlobSidecars(blk, blobs, proofs); err != nil {
|
||||
if err := s.validateBlobs(blk, blobs, proofs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1067,23 +1066,41 @@ func (s *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) validateBlobSidecars(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
func (s *Server) validateBlobs(blk interfaces.SignedBeaconBlock, blobs [][]byte, proofs [][]byte) error {
|
||||
if blk.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
kzgs, err := blk.Block().Body().BlobKzgCommitments()
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
commitments, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get blob kzg commitments")
|
||||
}
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(kzgs) {
|
||||
return errors.New("number of blobs, proofs, and commitments do not match")
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(blk.Block().Slot())
|
||||
if len(blobs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("number of blobs over max, %d > %d", len(blobs), maxBlobsPerBlock)
|
||||
}
|
||||
for i, blob := range blobs {
|
||||
b := kzg4844.Blob(blob)
|
||||
if err := kzg4844.VerifyBlobProof(&b, kzg4844.Commitment(kzgs[i]), kzg4844.Proof(proofs[i])); err != nil {
|
||||
return errors.Wrap(err, "could not verify blob proof")
|
||||
if blk.Version() >= version.Fulu {
|
||||
// For Fulu blocks, proofs are cell proofs (blobs * numberOfColumns)
|
||||
expectedProofsCount := uint64(len(blobs)) * numberOfColumns
|
||||
if uint64(len(proofs)) != expectedProofsCount || len(blobs) != len(commitments) {
|
||||
return fmt.Errorf("number of blobs (%d), cell proofs (%d), and commitments (%d) do not match (expected %d cell proofs)", len(blobs), len(proofs), len(commitments), expectedProofsCount)
|
||||
}
|
||||
// For Fulu blocks, proofs are cell proofs from execution client's BlobsBundleV2
|
||||
// Verify cell proofs directly without reconstructing data column sidecars
|
||||
if err := kzg.VerifyCellKZGProofBatchFromBlobData(blobs, commitments, proofs, numberOfColumns); err != nil {
|
||||
return errors.Wrap(err, "could not verify cell proofs")
|
||||
}
|
||||
} else {
|
||||
// For pre-Fulu blocks, proofs are blob proofs (1:1 with blobs)
|
||||
if len(blobs) != len(proofs) || len(blobs) != len(commitments) {
|
||||
return errors.Errorf("number of blobs (%d), proofs (%d), and commitments (%d) do not match", len(blobs), len(proofs), len(commitments))
|
||||
}
|
||||
// Use batch verification for better performance
|
||||
if err := kzg.VerifyBlobKZGProofBatch(blobs, commitments, proofs); err != nil {
|
||||
return errors.Wrap(err, "could not verify blob proofs")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1220,7 +1237,7 @@ func (s *Server) GetStateFork(w http.ResponseWriter, r *http.Request) {
|
||||
fork := st.Fork()
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status"+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1331,7 +1348,7 @@ func (s *Server) GetCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1512,7 +1529,7 @@ func (s *Server) GetFinalityCheckpoints(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1627,6 +1644,8 @@ func (s *Server) broadcastSeenBlockSidecars(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Broadcast blob sidecars with forkchoice checking
|
||||
for _, sc := range scs {
|
||||
r, err := sc.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
@@ -1686,7 +1705,7 @@ func (s *Server) GetPendingConsolidations(w http.ResponseWriter, r *http.Request
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1742,7 +1761,7 @@ func (s *Server) GetPendingDeposits(w http.ResponseWriter, r *http.Request) {
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1798,7 +1817,7 @@ func (s *Server) GetPendingPartialWithdrawals(w http.ResponseWriter, r *http.Req
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -1851,7 +1870,7 @@ func (s *Server) GetProposerLookahead(w http.ResponseWriter, r *http.Request) {
|
||||
} else {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
35
beacon-chain/rpc/eth/beacon/handlers_equivocation_test.go
Normal file
35
beacon-chain/rpc/eth/beacon/handlers_equivocation_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
rpctesting "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
// TestBlocks_NewSignedBeaconBlock_EquivocationFix tests that blocks.NewSignedBeaconBlock
|
||||
// correctly handles the fixed case where genericBlock.Block is passed instead of genericBlock
|
||||
func TestBlocks_NewSignedBeaconBlock_EquivocationFix(t *testing.T) {
|
||||
// Parse the Phase0 JSON block
|
||||
var block structs.SignedBeaconBlock
|
||||
err := json.Unmarshal([]byte(rpctesting.Phase0Block), &block)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to generic format
|
||||
genericBlock, err := block.ToGeneric()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the FIX: pass genericBlock.Block instead of genericBlock
|
||||
// This is what our fix changed in handlers.go line 704 and 858
|
||||
_, err = blocks.NewSignedBeaconBlock(genericBlock.Block)
|
||||
require.NoError(t, err, "NewSignedBeaconBlock should work with genericBlock.Block")
|
||||
|
||||
// Test the BROKEN version: pass genericBlock directly (this should fail)
|
||||
_, err = blocks.NewSignedBeaconBlock(genericBlock)
|
||||
if err == nil {
|
||||
t.Errorf("NewSignedBeaconBlock should fail with whole genericBlock but succeeded")
|
||||
}
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (s *Server) GetStateRoot(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -125,7 +125,7 @@ func (s *Server) GetRandao(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ func (s *Server) GetSyncCommittees(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api"
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
chainMock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache/depositsnapshot"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -40,7 +41,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
@@ -4781,25 +4781,329 @@ func TestServer_broadcastBlobSidecars(t *testing.T) {
|
||||
require.LogsContain(t, hook, "Broadcasted blob sidecar for already seen block")
|
||||
}
|
||||
|
||||
func Test_validateBlobSidecars(t *testing.T) {
|
||||
func Test_validateBlobs(t *testing.T) {
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
blob := util.GetRandBlob(123)
|
||||
commitment := GoKZG.KZGCommitment{180, 218, 156, 194, 59, 20, 10, 189, 186, 254, 132, 93, 7, 127, 104, 172, 238, 240, 237, 70, 83, 89, 1, 152, 99, 0, 165, 65, 143, 62, 20, 215, 230, 14, 205, 95, 28, 245, 54, 25, 160, 16, 178, 31, 232, 207, 38, 85}
|
||||
proof := GoKZG.KZGProof{128, 110, 116, 170, 56, 111, 126, 87, 229, 234, 211, 42, 110, 150, 129, 206, 73, 142, 167, 243, 90, 149, 240, 240, 236, 204, 143, 182, 229, 249, 81, 27, 153, 171, 83, 70, 144, 250, 42, 1, 188, 215, 71, 235, 30, 7, 175, 86}
|
||||
// Generate proper commitment and proof for the blob
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
proof, err := kzg.ComputeBlobKZGProof(&kzgBlob, commitment)
|
||||
require.NoError(t, err)
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
require.ErrorContains(t, "number of blobs, proofs, and commitments do not match", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{}))
|
||||
require.ErrorContains(t, "number of blobs (1), proofs (0), and commitments (1) do not match", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{}))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "could not verify blob proof: can't verify opening proof", s.validateBlobSidecars(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
require.ErrorContains(t, "could not verify blob proofs", s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
|
||||
blobs := [][]byte{}
|
||||
commitments := [][]byte{}
|
||||
proofs := [][]byte{}
|
||||
for i := 0; i < 10; i++ {
|
||||
blobs = append(blobs, blob[:])
|
||||
commitments = append(commitments, commitment[:])
|
||||
proofs = append(proofs, proof[:])
|
||||
}
|
||||
t.Run("pre-Deneb block should return early", func(t *testing.T) {
|
||||
// Create a pre-Deneb block (e.g., Capella)
|
||||
blk := util.NewBeaconBlockCapella()
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should return nil for pre-Deneb blocks regardless of blobs
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:1], proofs[:1]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block with valid single blob", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{commitment[:]}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{proof[:]}))
|
||||
})
|
||||
|
||||
t.Run("Deneb block with max blobs (6)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:6]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with exactly 6 blobs
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:6], proofs[:6]))
|
||||
})
|
||||
|
||||
t.Run("Deneb block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 10 // Deneb slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:7]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 7 blobs when max is 6
|
||||
err = s.validateBlobs(b, blobs[:7], proofs[:7])
|
||||
require.ErrorContains(t, "number of blobs over max, 7 > 6", err)
|
||||
})
|
||||
|
||||
t.Run("Electra block with valid blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot (epoch 5+)
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should pass with 9 blobs in Electra
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
})
|
||||
|
||||
t.Run("Electra block exceeding max blobs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up Electra config with max 9 blobs
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
blk.Block.Slot = 160 // Electra slot
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s := &Server{}
|
||||
// Should fail with 10 blobs when max is 9
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
|
||||
t.Run("Fulu block with valid cell proofs", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
testCfg.NumberOfColumns = 128 // Standard PeerDAS configuration
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
// Create Fulu block with proper cell proofs
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
|
||||
// Generate valid commitments and cell proofs for testing
|
||||
blobCount := 2
|
||||
commitments := make([][]byte, blobCount)
|
||||
fuluBlobs := make([][]byte, blobCount)
|
||||
var kzgBlobs []kzg.Blob
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := util.GetRandBlob(int64(i))
|
||||
fuluBlobs[i] = blob[:]
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
kzgBlobs = append(kzgBlobs, kzgBlob)
|
||||
|
||||
// Generate commitment
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
commitments[i] = commitment[:]
|
||||
}
|
||||
|
||||
blk.Block.Body.BlobKzgCommitments = commitments
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate cell proofs for the blobs (flattened format like execution client)
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
|
||||
for blobIdx := 0; blobIdx < blobCount; blobIdx++ {
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlobs[blobIdx])
|
||||
require.NoError(t, err)
|
||||
|
||||
for colIdx := uint64(0); colIdx < numberOfColumns; colIdx++ {
|
||||
cellProofIdx := uint64(blobIdx)*numberOfColumns + colIdx
|
||||
cellProofs[cellProofIdx] = cellsAndProofs.Proofs[colIdx][:]
|
||||
}
|
||||
}
|
||||
|
||||
s := &Server{}
|
||||
// Should use cell batch verification for Fulu blocks
|
||||
require.NoError(t, s.validateBlobs(b, fuluBlobs, cellProofs))
|
||||
})
|
||||
|
||||
t.Run("Fulu block with invalid cell proof count", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 5
|
||||
testCfg.FuluForkEpoch = 10
|
||||
testCfg.NumberOfColumns = 128
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
blk := util.NewBeaconBlockFulu()
|
||||
blk.Block.Slot = 320 // Epoch 10 (Fulu fork)
|
||||
|
||||
// Create valid commitments but wrong number of cell proofs
|
||||
blobCount := 2
|
||||
commitments := make([][]byte, blobCount)
|
||||
fuluBlobs := make([][]byte, blobCount)
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := util.GetRandBlob(int64(i))
|
||||
fuluBlobs[i] = blob[:]
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
copy(kzgBlob[:], blob[:])
|
||||
commitment, err := kzg.BlobToKZGCommitment(&kzgBlob)
|
||||
require.NoError(t, err)
|
||||
commitments[i] = commitment[:]
|
||||
}
|
||||
|
||||
blk.Block.Body.BlobKzgCommitments = commitments
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wrong number of cell proofs (should be blobCount * numberOfColumns)
|
||||
wrongCellProofs := make([][]byte, 10) // Too few proofs
|
||||
|
||||
s := &Server{}
|
||||
err = s.validateBlobs(b, fuluBlobs, wrongCellProofs)
|
||||
require.ErrorContains(t, "do not match", err)
|
||||
})
|
||||
|
||||
t.Run("Deneb block with invalid blob proof", func(t *testing.T) {
|
||||
blob := util.GetRandBlob(123)
|
||||
invalidProof := make([]byte, 48) // All zeros - invalid proof
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{sk.PublicKey().Marshal()}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{}
|
||||
err = s.validateBlobs(b, [][]byte{blob[:]}, [][]byte{invalidProof})
|
||||
require.ErrorContains(t, "could not verify blob proofs", err)
|
||||
})
|
||||
|
||||
t.Run("empty blobs and proofs should pass", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Body.BlobKzgCommitments = [][]byte{}
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{}
|
||||
require.NoError(t, s.validateBlobs(b, [][]byte{}, [][]byte{}))
|
||||
})
|
||||
|
||||
t.Run("BlobSchedule with progressive increases (BPO)", func(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
defer params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Set up config with BlobSchedule (BPO - Blob Production Optimization)
|
||||
testCfg := params.BeaconConfig().Copy()
|
||||
testCfg.DenebForkEpoch = 0
|
||||
testCfg.ElectraForkEpoch = 100
|
||||
testCfg.FuluForkEpoch = 200
|
||||
testCfg.DeprecatedMaxBlobsPerBlock = 6
|
||||
testCfg.DeprecatedMaxBlobsPerBlockElectra = 9
|
||||
// Define blob schedule with progressive increases
|
||||
testCfg.BlobSchedule = []params.BlobScheduleEntry{
|
||||
{Epoch: 0, MaxBlobsPerBlock: 3}, // Start with 3 blobs
|
||||
{Epoch: 10, MaxBlobsPerBlock: 5}, // Increase to 5 at epoch 10
|
||||
{Epoch: 20, MaxBlobsPerBlock: 7}, // Increase to 7 at epoch 20
|
||||
{Epoch: 30, MaxBlobsPerBlock: 9}, // Increase to 9 at epoch 30
|
||||
}
|
||||
params.OverrideBeaconConfig(testCfg)
|
||||
|
||||
s := &Server{}
|
||||
|
||||
// Test epoch 0-9: max 3 blobs
|
||||
t.Run("epoch 0-9: max 3 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 5 // Epoch 0
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:3]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:3], proofs[:3]))
|
||||
|
||||
// Should fail with 4 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:4]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:4], proofs[:4])
|
||||
require.ErrorContains(t, "number of blobs over max, 4 > 3", err)
|
||||
})
|
||||
|
||||
// Test epoch 30+: max 9 blobs
|
||||
t.Run("epoch 30+: max 9 blobs", func(t *testing.T) {
|
||||
blk := util.NewBeaconBlockDeneb()
|
||||
blk.Block.Slot = 960 // Epoch 30
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:9]
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.validateBlobs(b, blobs[:9], proofs[:9]))
|
||||
|
||||
// Should fail with 10 blobs
|
||||
blk.Block.Body.BlobKzgCommitments = commitments[:10]
|
||||
b, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
err = s.validateBlobs(b, blobs[:10], proofs[:10])
|
||||
require.ErrorContains(t, "number of blobs over max, 10 > 9", err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetPendingConsolidations(t *testing.T) {
|
||||
|
||||
@@ -44,7 +44,7 @@ func (s *Server) GetValidators(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -222,7 +222,7 @@ func (s *Server) GetValidator(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -258,7 +258,7 @@ func (s *Server) GetValidatorBalances(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
@@ -419,7 +419,7 @@ func (s *Server) getValidatorIdentitiesJSON(
|
||||
) {
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateId), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
@@ -46,7 +46,7 @@ func (s *Server) getBeaconStateV2(ctx context.Context, w http.ResponseWriter, id
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, id, s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check if state is optimistic: "+err.Error(), http.StatusInternalServerError)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
blockRoot, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -21,6 +22,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -40,6 +42,7 @@ go_test(
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -57,5 +60,6 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -2,11 +2,14 @@ package helpers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/eth/shared"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/lookup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/network/httputil"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -28,6 +31,22 @@ func PrepareStateFetchGRPCError(err error) error {
|
||||
return status.Errorf(codes.Internal, "Invalid state ID: %v", err)
|
||||
}
|
||||
|
||||
// HandleIsOptimisticError handles errors from IsOptimistic function calls and writes appropriate HTTP responses.
|
||||
func HandleIsOptimisticError(w http.ResponseWriter, err error) {
|
||||
var fetchErr *lookup.FetchStateError
|
||||
if errors.As(err, &fetchErr) {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
|
||||
if errors.As(err, &blockRootsNotFoundErr) {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
// IndexedVerificationFailure represents a collection of verification failures.
|
||||
type IndexedVerificationFailure struct {
|
||||
Failures []*SingleIndexedVerificationFailure `json:"failures"`
|
||||
|
||||
@@ -56,7 +56,8 @@ func IsOptimistic(
|
||||
if bytesutil.IsHex(stateId) {
|
||||
id, err := hexutil.Decode(stateIdString)
|
||||
if err != nil {
|
||||
return false, err
|
||||
e := lookup.NewStateIdParseError(err)
|
||||
return false, &e
|
||||
}
|
||||
return isStateRootOptimistic(ctx, id, optimisticModeFetcher, stateFetcher, chainInfo, database)
|
||||
} else if len(stateId) == 32 {
|
||||
@@ -127,7 +128,7 @@ func isStateRootOptimistic(
|
||||
) (bool, error) {
|
||||
st, err := stateFetcher.State(ctx, stateId)
|
||||
if err != nil {
|
||||
return true, errors.Wrap(err, "could not fetch state")
|
||||
return true, lookup.NewFetchStateError(err)
|
||||
}
|
||||
if st.Slot() == chainInfo.HeadSlot() {
|
||||
return optimisticModeFetcher.IsOptimistic(ctx)
|
||||
@@ -137,7 +138,7 @@ func isStateRootOptimistic(
|
||||
return true, errors.Wrapf(err, "could not get block roots for slot %d", st.Slot())
|
||||
}
|
||||
if !has {
|
||||
return true, errors.New("no block roots returned from the database")
|
||||
return true, lookup.NewBlockRootsNotFoundError()
|
||||
}
|
||||
for _, r := range roots {
|
||||
b, err := database.Block(ctx, r)
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
chainmock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/OffchainLabs/prysm/v6/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/lookup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
@@ -21,6 +24,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestIsOptimistic(t *testing.T) {
|
||||
@@ -226,7 +230,67 @@ func TestIsOptimistic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, o)
|
||||
})
|
||||
t.Run("State not found", func(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
chainSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
|
||||
mf := &testutil.MockStater{
|
||||
CustomError: lookup.NewFetchStateError(nil),
|
||||
}
|
||||
_, err = IsOptimistic(ctx, []byte(hexutil.Encode(bytesutil.PadTo([]byte("root"), 32))), cs, mf, cs, db)
|
||||
var fetchErr *lookup.FetchStateError
|
||||
require.Equal(t, true, errors.As(err, &fetchErr))
|
||||
})
|
||||
|
||||
t.Run("stateId invalid", func(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
chainSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
|
||||
mf := &testutil.MockStater{
|
||||
CustomError: lookup.NewFetchStateError(nil),
|
||||
}
|
||||
_, err = IsOptimistic(ctx, []byte("0xabc"), cs, mf, cs, db)
|
||||
var fetchErr *lookup.FetchStateError
|
||||
require.Equal(t, false, errors.As(err, &fetchErr))
|
||||
})
|
||||
t.Run("block roots not found", func(t *testing.T) {
|
||||
b, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
b.SetStateRoot(bytesutil.PadTo([]byte("root"), 32))
|
||||
db := dbtest.SetupDB(t)
|
||||
require.NoError(t, db.SaveBlock(ctx, b))
|
||||
chainSt, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, chainSt.SetSlot(fieldparams.SlotsPerEpoch))
|
||||
bRoot, err := b.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cs := &chainmock.ChainService{State: chainSt, OptimisticRoots: map[[32]byte]bool{bRoot: true}}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(fieldparams.SlotsPerEpoch+1)))
|
||||
require.NoError(t, err)
|
||||
mf := &testutil.MockStater{BeaconState: st}
|
||||
_, err = IsOptimistic(ctx, []byte(hexutil.Encode(bytesutil.PadTo([]byte("root"), 32))), cs, mf, cs, db)
|
||||
var blockRootsNotFoundErr *lookup.BlockRootsNotFoundError
|
||||
require.Equal(t, true, errors.As(err, &blockRootsNotFoundErr))
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("slot", func(t *testing.T) {
|
||||
t.Run("head is not optimistic", func(t *testing.T) {
|
||||
cs := &chainmock.ChainService{Optimistic: false}
|
||||
@@ -319,6 +383,36 @@ func TestIsOptimistic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestHandleIsOptimisticError(t *testing.T) {
|
||||
t.Run("fetch-state error handled as 404", func(t *testing.T) {
|
||||
rr := httptest.NewRecorder()
|
||||
notFoundErr := lookup.StateNotFoundError{}
|
||||
fetchErr := lookup.NewFetchStateError(¬FoundErr)
|
||||
HandleIsOptimisticError(rr, fetchErr)
|
||||
|
||||
require.Equal(t, http.StatusNotFound, rr.Code)
|
||||
require.StringContains(t, notFoundErr.Error(), rr.Body.String())
|
||||
})
|
||||
t.Run("no block roots error handled as 404", func(t *testing.T) {
|
||||
rr := httptest.NewRecorder()
|
||||
blockRootsErr := lookup.NewBlockRootsNotFoundError()
|
||||
HandleIsOptimisticError(rr, blockRootsErr)
|
||||
|
||||
require.Equal(t, http.StatusNotFound, rr.Code)
|
||||
require.StringContains(t, blockRootsErr.Error(), rr.Body.String())
|
||||
})
|
||||
|
||||
t.Run("generic error handled as 500", func(t *testing.T) {
|
||||
rr := httptest.NewRecorder()
|
||||
genericErr := errors.New("boom")
|
||||
|
||||
HandleIsOptimisticError(rr, genericErr)
|
||||
|
||||
require.Equal(t, http.StatusInternalServerError, rr.Code)
|
||||
require.StringContains(t, "Could not check optimistic status: boom", rr.Body.String())
|
||||
})
|
||||
}
|
||||
|
||||
// prepareForkchoiceState prepares a beacon state with the given data to mock
|
||||
// insert into forkchoice
|
||||
func prepareForkchoiceState(
|
||||
|
||||
@@ -23,6 +23,20 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type BlockRootsNotFoundError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func NewBlockRootsNotFoundError() *BlockRootsNotFoundError {
|
||||
return &BlockRootsNotFoundError{
|
||||
message: "no block roots returned from the database",
|
||||
}
|
||||
}
|
||||
|
||||
func (e BlockRootsNotFoundError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// BlockIdParseError represents an error scenario where a block ID could not be parsed.
|
||||
type BlockIdParseError struct {
|
||||
message string
|
||||
@@ -341,7 +355,7 @@ func (p *BeaconDbBlocker) blobsFromStoredDataColumns(block blocks.ROBlock, indic
|
||||
stored := summary.Stored()
|
||||
count := uint64(len(stored))
|
||||
|
||||
if count < peerdas.MinimumColumnsCountToReconstruct() {
|
||||
if count < peerdas.MinimumColumnCountToReconstruct() {
|
||||
// There is no way to reconstruct the data columns.
|
||||
return nil, &core.RpcError{
|
||||
Err: errors.Errorf("the node does not custody enough data columns to reconstruct blobs - please start the beacon node with the `--%s` flag to ensure this call to succeed, or retry later if it is already the case", flags.SubscribeAllDataSubnets.Name),
|
||||
|
||||
@@ -443,7 +443,7 @@ func TestGetBlob(t *testing.T) {
|
||||
setupFulu(t)
|
||||
|
||||
_, dataColumnStorage := filesystem.NewEphemeralDataColumnStorageAndFs(t)
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnsCountToReconstruct()+1])
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars[1 : peerdas.MinimumColumnCountToReconstruct()+1])
|
||||
require.NoError(t, err)
|
||||
|
||||
blocker := &BeaconDbBlocker{
|
||||
|
||||
@@ -21,6 +21,27 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type FetchStateError struct {
|
||||
message string
|
||||
cause error
|
||||
}
|
||||
|
||||
func NewFetchStateError(cause error) *FetchStateError {
|
||||
return &FetchStateError{
|
||||
message: "could not fetch state",
|
||||
cause: cause,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *FetchStateError) Error() string {
|
||||
if e.cause != nil {
|
||||
return e.message + ": " + e.cause.Error()
|
||||
}
|
||||
return e.message
|
||||
}
|
||||
|
||||
func (e *FetchStateError) Unwrap() error { return e.cause }
|
||||
|
||||
// StateIdParseError represents an error scenario where a state ID could not be parsed.
|
||||
type StateIdParseError struct {
|
||||
message string
|
||||
|
||||
@@ -56,11 +56,7 @@ func (s *Server) GetValidatorCount(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
isOptimistic, err := helpers.IsOptimistic(ctx, []byte(stateID), s.OptimisticModeFetcher, s.Stater, s.ChainInfoFetcher, s.BeaconDB)
|
||||
if err != nil {
|
||||
errJson := &httputil.DefaultJsonError{
|
||||
Message: fmt.Sprintf("could not check if slot's block is optimistic: %v", err),
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
httputil.WriteError(w, errJson)
|
||||
helpers.HandleIsOptimisticError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
# gazelle:ignore
|
||||
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
@@ -37,6 +39,7 @@ go_library(
|
||||
"//api/client/builder:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/builder:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositsnapshot:go_default_library",
|
||||
@@ -47,6 +50,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -63,8 +67,8 @@ go_library(
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -81,7 +85,7 @@ go_library(
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
@@ -181,7 +185,6 @@ common_deps = [
|
||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||
]
|
||||
|
||||
# gazelle:ignore
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
timeout = "moderate",
|
||||
|
||||
@@ -248,6 +248,8 @@ func (vs *Server) sendBlocks(stream ethpb.BeaconNodeValidator_StreamBlocksAltair
|
||||
b.Block = ðpb.StreamBlocksResponse_DenebBlock{DenebBlock: p}
|
||||
case *ethpb.SignedBeaconBlockElectra:
|
||||
b.Block = ðpb.StreamBlocksResponse_ElectraBlock{ElectraBlock: p}
|
||||
case *ethpb.SignedBeaconBlockFulu:
|
||||
b.Block = ðpb.StreamBlocksResponse_FuluBlock{FuluBlock: p}
|
||||
default:
|
||||
log.Errorf("Unknown block type %T", p)
|
||||
}
|
||||
|
||||
@@ -381,3 +381,92 @@ func TestServer_StreamSlotsVerified_OnHeadUpdated(t *testing.T) {
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamBlocksVerified_FuluBlock(t *testing.T) {
|
||||
db := dbTest.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
beaconState, privs := util.DeterministicGenesisStateFulu(t, 32)
|
||||
c, err := altair.NextSyncCommittee(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
|
||||
|
||||
b, err := util.GenerateFullBlockFulu(beaconState, privs, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wrappedBlk := util.SaveBlock(t, ctx, db, b)
|
||||
chainService := &chainMock.ChainService{State: beaconState}
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
StateNotifier: chainService.StateNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
|
||||
mockStream.EXPECT().Send(ðpb.StreamBlocksResponse{Block: ðpb.StreamBlocksResponse_FuluBlock{FuluBlock: b}}).Do(func(arg0 interface{}) {
|
||||
exitRoutine <- true
|
||||
})
|
||||
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
||||
|
||||
go func(tt *testing.T) {
|
||||
err := server.StreamBlocksAltair(ðpb.StreamBlocksRequest{VerifiedOnly: true}, mockStream)
|
||||
if s, _ := status.FromError(err); s.Code() != codes.Canceled {
|
||||
assert.NoError(tt, err)
|
||||
}
|
||||
}(t)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = server.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{Slot: b.Block.Slot, BlockRoot: r, SignedBlock: wrappedBlk},
|
||||
})
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
func TestServer_StreamBlocks_FuluBlock(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.BeaconConfig())
|
||||
ctx := t.Context()
|
||||
beaconState, privs := util.DeterministicGenesisStateFulu(t, 64)
|
||||
c, err := altair.NextSyncCommittee(ctx, beaconState)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconState.SetCurrentSyncCommittee(c))
|
||||
|
||||
b, err := util.GenerateFullBlockFulu(beaconState, privs, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
chainService := &chainMock.ChainService{State: beaconState}
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
BlockNotifier: chainService.BlockNotifier(),
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
exitRoutine := make(chan bool)
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
mockStream := mock.NewMockBeaconNodeValidatorAltair_StreamBlocksServer(ctrl)
|
||||
|
||||
mockStream.EXPECT().Send(ðpb.StreamBlocksResponse{Block: ðpb.StreamBlocksResponse_FuluBlock{FuluBlock: b}}).Do(func(arg0 interface{}) {
|
||||
exitRoutine <- true
|
||||
})
|
||||
mockStream.EXPECT().Context().Return(ctx).AnyTimes()
|
||||
|
||||
go func(tt *testing.T) {
|
||||
err := server.StreamBlocksAltair(ðpb.StreamBlocksRequest{}, mockStream)
|
||||
if s, _ := status.FromError(err); s.Code() != codes.Canceled {
|
||||
assert.NoError(tt, err)
|
||||
}
|
||||
}(t)
|
||||
wrappedBlk, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
|
||||
for sent := 0; sent == 0; {
|
||||
sent = server.BlockNotifier.BlockFeed().Send(&feed.Event{
|
||||
Type: blockfeed.ReceivedBlock,
|
||||
Data: &blockfeed.ReceivedBlockData{SignedBlock: wrappedBlk},
|
||||
})
|
||||
}
|
||||
<-exitRoutine
|
||||
}
|
||||
|
||||
@@ -29,12 +29,19 @@ func TestConstructGenericBeaconBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
r1, err := eb.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
result, err := vs.constructGenericBeaconBlock(b, nil, primitives.ZeroWei())
|
||||
bundle := &enginev1.BlobsBundleV2{
|
||||
KzgCommitments: [][]byte{{1, 2, 3}},
|
||||
Proofs: [][]byte{{4, 5, 6}},
|
||||
Blobs: [][]byte{{7, 8, 9}},
|
||||
}
|
||||
result, err := vs.constructGenericBeaconBlock(b, bundle, primitives.ZeroWei())
|
||||
require.NoError(t, err)
|
||||
r2, err := result.GetFulu().Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, r1, r2)
|
||||
require.Equal(t, result.IsBlinded, false)
|
||||
require.DeepEqual(t, bundle.Blobs, result.GetFulu().GetBlobs())
|
||||
require.DeepEqual(t, bundle.Proofs, result.GetFulu().GetKzgProofs())
|
||||
})
|
||||
|
||||
// Test for Electra version
|
||||
|
||||
@@ -15,9 +15,12 @@ import (
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -58,28 +61,31 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert slot to time")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
}).Info("Begin building block")
|
||||
|
||||
log := log.WithField("slot", req.Slot)
|
||||
log.WithField("sinceSlotStartTime", time.Since(t)).Info("Begin building block")
|
||||
|
||||
// A syncing validator should not produce a block.
|
||||
if vs.SyncChecker.Syncing() {
|
||||
log.Error("Fail to build block: node is syncing")
|
||||
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
// An optimistic validator MUST NOT produce a block (i.e., sign across the DOMAIN_BEACON_PROPOSER domain).
|
||||
if slots.ToEpoch(req.Slot) >= params.BeaconConfig().BellatrixForkEpoch {
|
||||
if err := vs.optimisticStatus(ctx); err != nil {
|
||||
log.WithError(err).Error("Fail to build block: node is optimistic")
|
||||
return nil, status.Errorf(codes.Unavailable, "Validator is not ready to propose: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
head, parentRoot, err := vs.getParentState(ctx, req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get parent state")
|
||||
return nil, err
|
||||
}
|
||||
sBlk, err := getEmptyBlock(req.Slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Fail to build block: could not get empty block")
|
||||
return nil, status.Errorf(codes.Internal, "Could not prepare block: %v", err)
|
||||
}
|
||||
// Set slot, graffiti, randao reveal, and parent root.
|
||||
@@ -101,8 +107,7 @@ func (vs *Server) GetBeaconBlock(ctx context.Context, req *ethpb.BlockRequest) (
|
||||
}
|
||||
|
||||
resp, err := vs.BuildBlockParallel(ctx, sBlk, head, req.SkipMevBoost, builderBoostFactor)
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"slot": req.Slot,
|
||||
log = log.WithFields(logrus.Fields{
|
||||
"sinceSlotStartTime": time.Since(t),
|
||||
"validator": sBlk.Block().ProposerIndex(),
|
||||
})
|
||||
@@ -275,6 +280,11 @@ func (vs *Server) BuildBlockParallel(ctx context.Context, sBlk interfaces.Signed
|
||||
//
|
||||
// ProposeBeaconBlock handles the proposal of beacon blocks.
|
||||
func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
|
||||
var (
|
||||
blobSidecars []*ethpb.BlobSidecar
|
||||
dataColumnSidecars []*ethpb.DataColumnSidecar
|
||||
)
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "ProposerServer.ProposeBeaconBlock")
|
||||
defer span.End()
|
||||
|
||||
@@ -287,24 +297,31 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return nil, status.Errorf(codes.InvalidArgument, "%s: %v", "decode block failed", err)
|
||||
}
|
||||
|
||||
var sidecars []*ethpb.BlobSidecar
|
||||
if block.IsBlinded() {
|
||||
block, sidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
} else if block.Version() >= version.Deneb {
|
||||
sidecars, err = vs.blobSidecarsFromUnblindedBlock(block, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
}
|
||||
|
||||
root, err := block.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not hash tree root: %v", err)
|
||||
}
|
||||
|
||||
// For post-Fulu blinded blocks, submit to relay and return early
|
||||
if block.IsBlinded() && slots.ToEpoch(block.Block().Slot()) >= params.BeaconConfig().FuluForkEpoch {
|
||||
err := vs.BlockBuilder.SubmitBlindedBlockPostFulu(ctx, block)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not submit blinded block post-Fulu: %v", err)
|
||||
}
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
if block.IsBlinded() {
|
||||
block, blobSidecars, err = vs.handleBlindedBlock(ctx, block)
|
||||
} else if block.Version() >= version.Deneb {
|
||||
blobSidecars, dataColumnSidecars, err = vs.handleUnblindedBlock(block, req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "%s: %v", "handle block failed", err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -315,10 +332,9 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
errChan <- nil
|
||||
}()
|
||||
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, sidecars, root); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive blobs: %v", err)
|
||||
if err := vs.broadcastAndReceiveSidecars(ctx, block, root, blobSidecars, dataColumnSidecars); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive sidecars: %v", err)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if err := <-errChan; err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "Could not broadcast/receive block: %v", err)
|
||||
@@ -327,11 +343,35 @@ func (vs *Server) ProposeBeaconBlock(ctx context.Context, req *ethpb.GenericSign
|
||||
return ðpb.ProposeResponse{BlockRoot: root[:]}, nil
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks.
|
||||
// broadcastAndReceiveSidecars broadcasts and receives sidecars.
|
||||
func (vs *Server) broadcastAndReceiveSidecars(
|
||||
ctx context.Context,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
root [fieldparams.RootLength]byte,
|
||||
blobSidecars []*ethpb.BlobSidecar,
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
) error {
|
||||
if block.Version() >= version.Fulu {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSideCars, root, block.Block().Slot()); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive data columns")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := vs.broadcastAndReceiveBlobs(ctx, blobSidecars, root); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive blobs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleBlindedBlock processes blinded beacon blocks (pre-Fulu only).
|
||||
// Post-Fulu blinded blocks are handled directly in ProposeBeaconBlock.
|
||||
func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.SignedBeaconBlock) (interfaces.SignedBeaconBlock, []*ethpb.BlobSidecar, error) {
|
||||
if block.Version() < version.Bellatrix {
|
||||
return nil, nil, errors.New("pre-Bellatrix blinded block")
|
||||
}
|
||||
|
||||
if vs.BlockBuilder == nil || !vs.BlockBuilder.Configured() {
|
||||
return nil, nil, errors.New("unconfigured block builder")
|
||||
}
|
||||
@@ -358,16 +398,34 @@ func (vs *Server) handleBlindedBlock(ctx context.Context, block interfaces.Signe
|
||||
return copiedBlock, sidecars, nil
|
||||
}
|
||||
|
||||
func (vs *Server) blobSidecarsFromUnblindedBlock(block interfaces.SignedBeaconBlock, req *ethpb.GenericSignedBeaconBlock) ([]*ethpb.BlobSidecar, error) {
|
||||
func (vs *Server) handleUnblindedBlock(
|
||||
block interfaces.SignedBeaconBlock,
|
||||
req *ethpb.GenericSignedBeaconBlock,
|
||||
) ([]*ethpb.BlobSidecar, []*ethpb.DataColumnSidecar, error) {
|
||||
rawBlobs, proofs, err := blobsAndProofs(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
|
||||
if block.Version() >= version.Fulu {
|
||||
dataColumnSideCars, err := peerdas.ConstructDataColumnSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "construct data column sidecars")
|
||||
}
|
||||
|
||||
return nil, dataColumnSideCars, nil
|
||||
}
|
||||
|
||||
blobSidecars, err := BuildBlobSidecars(block, rawBlobs, proofs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "build blob sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil, nil
|
||||
}
|
||||
|
||||
// broadcastReceiveBlock broadcasts a block and handles its reception.
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [32]byte) error {
|
||||
func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.SignedBeaconBlock, root [fieldparams.RootLength]byte) error {
|
||||
protoBlock, err := block.Proto()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "protobuf conversion failed")
|
||||
@@ -383,18 +441,14 @@ func (vs *Server) broadcastReceiveBlock(ctx context.Context, block interfaces.Si
|
||||
}
|
||||
|
||||
// broadcastAndReceiveBlobs handles the broadcasting and reception of blob sidecars.
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [32]byte) error {
|
||||
func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethpb.BlobSidecar, root [fieldparams.RootLength]byte) error {
|
||||
eg, eCtx := errgroup.WithContext(ctx)
|
||||
for i, sc := range sidecars {
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
subIdx := i
|
||||
sCar := sc
|
||||
for subIdx, sc := range sidecars {
|
||||
eg.Go(func() error {
|
||||
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sCar); err != nil {
|
||||
if err := vs.P2P.BroadcastBlob(eCtx, uint64(subIdx), sc); err != nil {
|
||||
return errors.Wrap(err, "broadcast blob failed")
|
||||
}
|
||||
readOnlySc, err := blocks.NewROBlobWithRoot(sCar, root)
|
||||
readOnlySc, err := blocks.NewROBlobWithRoot(sc, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ROBlob creation failed")
|
||||
}
|
||||
@@ -412,6 +466,69 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
sidecars []*ethpb.DataColumnSidecar,
|
||||
root [fieldparams.RootLength]byte,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
dataColumnsWithholdCount := features.Get().DataColumnsWithholdCount
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(sidecars))
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, sd := range sidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumnWithRoot(sd, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new read-only data column with root")
|
||||
}
|
||||
|
||||
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
// Copy the iteration instance to a local variable to give each go-routine its own copy to play with.
|
||||
// See https://golang.org/doc/faq#closures_and_goroutines for more details.
|
||||
sidecar := sd
|
||||
eg.Go(func() error {
|
||||
if sidecar.Index < dataColumnsWithholdCount {
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slot,
|
||||
"index": sidecar.Index,
|
||||
}).Warning("Withholding data column")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumnSidecar(root, subnet, sidecar); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
}
|
||||
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||
//
|
||||
// PrepareBeaconProposer caches and updates the fee recipient for the given proposer.
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/builder"
|
||||
builderTest "github.com/OffchainLabs/prysm/v6/beacon-chain/builder/testing"
|
||||
@@ -894,6 +895,9 @@ func injectSlashings(t *testing.T, st state.BeaconState, keys []bls.SecretKey, s
|
||||
}
|
||||
|
||||
func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
// Initialize KZG for Fulu blocks
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
block func([32]byte) *ethpb.GenericSignedBeaconBlock
|
||||
@@ -1098,6 +1102,131 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
},
|
||||
err: "blob KZG commitments don't match number of blobs or KZG proofs",
|
||||
},
|
||||
{
|
||||
name: "fulu block no blob",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{Slot: 5, ParentRoot: parent[:], Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{})},
|
||||
},
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu block with single blob and cell proofs",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
numberOfColumns := uint64(128)
|
||||
// For Fulu, we have cell proofs (blobs * numberOfColumns)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := uint64(0); i < numberOfColumns; i++ {
|
||||
cellProofs[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
// Blob must be exactly 131072 bytes
|
||||
blob := make([]byte, 131072)
|
||||
blob[0] = 0x01
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Slot: 5, ParentRoot: parent[:],
|
||||
Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{
|
||||
BlobKzgCommitments: [][]byte{bytesutil.PadTo([]byte("kc"), 48)},
|
||||
}),
|
||||
},
|
||||
},
|
||||
KzgProofs: cellProofs,
|
||||
Blobs: [][]byte{blob},
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu block with multiple blobs and cell proofs",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
numberOfColumns := uint64(128)
|
||||
blobCount := 3
|
||||
// For Fulu, we have cell proofs (blobs * numberOfColumns)
|
||||
cellProofs := make([][]byte, uint64(blobCount)*numberOfColumns)
|
||||
for i := range cellProofs {
|
||||
cellProofs[i] = bytesutil.PadTo([]byte{byte(i % 256)}, 48)
|
||||
}
|
||||
// Create properly sized blobs (131072 bytes each)
|
||||
blobs := make([][]byte, blobCount)
|
||||
for i := 0; i < blobCount; i++ {
|
||||
blob := make([]byte, 131072)
|
||||
blob[0] = byte(i + 1)
|
||||
blobs[i] = blob
|
||||
}
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Slot: 5, ParentRoot: parent[:],
|
||||
Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{
|
||||
BlobKzgCommitments: [][]byte{
|
||||
bytesutil.PadTo([]byte("kc"), 48),
|
||||
bytesutil.PadTo([]byte("kc1"), 48),
|
||||
bytesutil.PadTo([]byte("kc2"), 48),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
KzgProofs: cellProofs,
|
||||
Blobs: blobs,
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu block wrong cell proof count (should be blobs * 128)",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
// Wrong number of cell proofs - should be 2 * 128 = 256, but providing only 2
|
||||
// Create properly sized blobs
|
||||
blob1 := make([]byte, 131072)
|
||||
blob1[0] = 0x01
|
||||
blob2 := make([]byte, 131072)
|
||||
blob2[0] = 0x02
|
||||
sb := ðpb.SignedBeaconBlockContentsFulu{
|
||||
Block: ðpb.SignedBeaconBlockFulu{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Slot: 5, ParentRoot: parent[:],
|
||||
Body: util.HydrateBeaconBlockBodyElectra(ðpb.BeaconBlockBodyElectra{
|
||||
BlobKzgCommitments: [][]byte{
|
||||
bytesutil.PadTo([]byte("kc"), 48),
|
||||
bytesutil.PadTo([]byte("kc1"), 48),
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
KzgProofs: [][]byte{{0x01}, {0x02}}, // Wrong: should be 256 cell proofs
|
||||
Blobs: [][]byte{blob1, blob2},
|
||||
}
|
||||
blk := ðpb.GenericSignedBeaconBlock_Fulu{Fulu: sb}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk, IsBlinded: false}
|
||||
},
|
||||
err: "blobs and cells proofs mismatch",
|
||||
},
|
||||
{
|
||||
name: "blind fulu block with blob commitments",
|
||||
block: func(parent [32]byte) *ethpb.GenericSignedBeaconBlock {
|
||||
blockToPropose := util.NewBlindedBeaconBlockFulu()
|
||||
blockToPropose.Message.Slot = 5
|
||||
blockToPropose.Message.ParentRoot = parent[:]
|
||||
txRoot, err := ssz.TransactionsRoot([][]byte{})
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := ssz.WithdrawalSliceRoot([]*enginev1.Withdrawal{}, fieldparams.MaxWithdrawalsPerPayload)
|
||||
require.NoError(t, err)
|
||||
blockToPropose.Message.Body.ExecutionPayloadHeader.TransactionsRoot = txRoot[:]
|
||||
blockToPropose.Message.Body.ExecutionPayloadHeader.WithdrawalsRoot = withdrawalsRoot[:]
|
||||
blockToPropose.Message.Body.BlobKzgCommitments = [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}
|
||||
blk := ðpb.GenericSignedBeaconBlock_BlindedFulu{BlindedFulu: blockToPropose}
|
||||
return ðpb.GenericSignedBeaconBlock{Block: blk}
|
||||
},
|
||||
useBuilder: true,
|
||||
err: "commitment value doesn't match block", // Known issue with mock builder cell proof mismatch
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -1111,15 +1240,29 @@ func TestProposer_ProposeBlock_OK(t *testing.T) {
|
||||
|
||||
c := &mock.ChainService{Root: bsRoot[:], State: beaconState}
|
||||
db := dbutil.SetupDB(t)
|
||||
// Create cell proofs for Fulu blocks (128 proofs per blob)
|
||||
numberOfColumns := uint64(128)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := uint64(0); i < numberOfColumns; i++ {
|
||||
cellProofs[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
// Create properly sized blob for mock builder
|
||||
mockBlob := make([]byte, 131072)
|
||||
mockBlob[0] = 0x03
|
||||
// Use the same commitment as in the blind block test
|
||||
mockCommitment := bytesutil.PadTo([]byte{0x01}, 48)
|
||||
|
||||
proposerServer := &Server{
|
||||
BlockReceiver: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
P2P: mockp2p.NewTestP2P(t),
|
||||
BlockBuilder: &builderTest.MockBuilderService{HasConfigured: tt.useBuilder, PayloadCapella: emptyPayloadCapella(), PayloadDeneb: emptyPayloadDeneb(),
|
||||
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{bytesutil.PadTo([]byte{0x01}, 48)}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}}},
|
||||
BeaconDB: db,
|
||||
BlobReceiver: c,
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
BlobBundle: &enginev1.BlobsBundle{KzgCommitments: [][]byte{mockCommitment}, Proofs: [][]byte{{0x02}}, Blobs: [][]byte{{0x03}}},
|
||||
BlobBundleV2: &enginev1.BlobsBundleV2{KzgCommitments: [][]byte{mockCommitment}, Proofs: cellProofs, Blobs: [][]byte{mockBlob}}},
|
||||
BeaconDB: db,
|
||||
BlobReceiver: c,
|
||||
DataColumnReceiver: c, // Add DataColumnReceiver for Fulu blocks
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
}
|
||||
blockToPropose := tt.block(bsRoot)
|
||||
res, err := proposerServer.ProposeBeaconBlock(t.Context(), blockToPropose)
|
||||
@@ -3286,3 +3429,252 @@ func TestProposer_ElectraBlobsAndProofs(t *testing.T) {
|
||||
require.Equal(t, 10, len(blobs))
|
||||
require.Equal(t, 10, len(proofs))
|
||||
}
|
||||
|
||||
func TestServer_ProposeBeaconBlock_PostFuluBlindedBlock(t *testing.T) {
|
||||
db := dbutil.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
beaconState, parentRoot, _ := util.DeterministicGenesisStateWithGenesisBlock(t, ctx, db, 100)
|
||||
require.NoError(t, beaconState.SetSlot(1))
|
||||
|
||||
t.Run("post-Fulu blinded block - early return success", func(t *testing.T) {
|
||||
// Set up config with Fulu fork at epoch 5
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
mockBuilder := &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
Cfg: &builderTest.Config{BeaconDB: db},
|
||||
ErrSubmitBlindedBlockPostFulu: nil, // Success case
|
||||
}
|
||||
|
||||
c := &mock.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
BlockReceiver: c,
|
||||
BlobReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: c,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
BlockBuilder: mockBuilder,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
|
||||
// Create a blinded block at slot 160 (epoch 5, which is >= FuluForkEpoch)
|
||||
blindedBlock := util.NewBlindedBeaconBlockDeneb()
|
||||
blindedBlock.Message.Slot = 160 // This puts us at epoch 5 (160/32 = 5)
|
||||
blindedBlock.Message.ProposerIndex = 0
|
||||
blindedBlock.Message.ParentRoot = parentRoot[:]
|
||||
blindedBlock.Message.StateRoot = make([]byte, 32)
|
||||
|
||||
req := ðpb.GenericSignedBeaconBlock{
|
||||
Block: ðpb.GenericSignedBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock},
|
||||
}
|
||||
|
||||
// This should trigger the post-Fulu early return path
|
||||
res, err := proposerServer.ProposeBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.NotEmpty(t, res.BlockRoot)
|
||||
})
|
||||
|
||||
t.Run("post-Fulu blinded block - builder submission error", func(t *testing.T) {
|
||||
// Set up config with Fulu fork at epoch 5
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
mockBuilder := &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
Cfg: &builderTest.Config{BeaconDB: db},
|
||||
ErrSubmitBlindedBlockPostFulu: errors.New("post-Fulu builder submission failed"),
|
||||
}
|
||||
|
||||
c := &mock.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
BlockReceiver: c,
|
||||
BlobReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: c,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
BlockBuilder: mockBuilder,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
|
||||
// Create a blinded block at slot 160 (epoch 5)
|
||||
blindedBlock := util.NewBlindedBeaconBlockDeneb()
|
||||
blindedBlock.Message.Slot = 160
|
||||
blindedBlock.Message.ProposerIndex = 0
|
||||
blindedBlock.Message.ParentRoot = parentRoot[:]
|
||||
blindedBlock.Message.StateRoot = make([]byte, 32)
|
||||
|
||||
req := ðpb.GenericSignedBeaconBlock{
|
||||
Block: ðpb.GenericSignedBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock},
|
||||
}
|
||||
|
||||
_, err := proposerServer.ProposeBeaconBlock(ctx, req)
|
||||
require.ErrorContains(t, "Could not submit blinded block post-Fulu", err)
|
||||
require.ErrorContains(t, "post-Fulu builder submission failed", err)
|
||||
})
|
||||
|
||||
t.Run("pre-Fulu blinded block - uses regular handleBlindedBlock path", func(t *testing.T) {
|
||||
// Set up config with Fulu fork at epoch 10 (future)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 10
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
mockBuilder := &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
Cfg: &builderTest.Config{BeaconDB: db},
|
||||
PayloadDeneb: &enginev1.ExecutionPayloadDeneb{},
|
||||
BlobBundle: &enginev1.BlobsBundle{},
|
||||
}
|
||||
|
||||
c := &mock.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
BlockReceiver: c,
|
||||
BlobReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: c,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
BlockBuilder: mockBuilder,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
|
||||
// Create a blinded block at slot 160 (epoch 5, which is < FuluForkEpoch=10)
|
||||
blindedBlock := util.NewBlindedBeaconBlockDeneb()
|
||||
blindedBlock.Message.Slot = 160
|
||||
blindedBlock.Message.ProposerIndex = 0
|
||||
blindedBlock.Message.ParentRoot = parentRoot[:]
|
||||
blindedBlock.Message.StateRoot = make([]byte, 32)
|
||||
|
||||
req := ðpb.GenericSignedBeaconBlock{
|
||||
Block: ðpb.GenericSignedBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock},
|
||||
}
|
||||
|
||||
// This should NOT trigger the post-Fulu early return path, but use handleBlindedBlock instead
|
||||
res, err := proposerServer.ProposeBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.NotEmpty(t, res.BlockRoot)
|
||||
})
|
||||
|
||||
t.Run("boundary test - exactly at Fulu fork epoch", func(t *testing.T) {
|
||||
// Set up config with Fulu fork at epoch 5
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
mockBuilder := &builderTest.MockBuilderService{
|
||||
HasConfigured: true,
|
||||
Cfg: &builderTest.Config{BeaconDB: db},
|
||||
ErrSubmitBlindedBlockPostFulu: nil,
|
||||
}
|
||||
|
||||
c := &mock.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
BlockReceiver: c,
|
||||
BlobReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: c,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
BlockBuilder: mockBuilder,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
|
||||
// Create a blinded block at slot 160 (exactly epoch 5)
|
||||
blindedBlock := util.NewBlindedBeaconBlockDeneb()
|
||||
blindedBlock.Message.Slot = 160 // 160/32 = 5 (exactly at FuluForkEpoch)
|
||||
blindedBlock.Message.ProposerIndex = 0
|
||||
blindedBlock.Message.ParentRoot = parentRoot[:]
|
||||
blindedBlock.Message.StateRoot = make([]byte, 32)
|
||||
|
||||
req := ðpb.GenericSignedBeaconBlock{
|
||||
Block: ðpb.GenericSignedBeaconBlock_BlindedDeneb{BlindedDeneb: blindedBlock},
|
||||
}
|
||||
|
||||
// Should trigger post-Fulu path since epoch 5 >= FuluForkEpoch (5)
|
||||
res, err := proposerServer.ProposeBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.NotEmpty(t, res.BlockRoot)
|
||||
})
|
||||
|
||||
t.Run("unblinded block - not affected by post-Fulu condition", func(t *testing.T) {
|
||||
// Set up config with Fulu fork at epoch 5
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
c := &mock.ChainService{State: beaconState, Root: parentRoot[:]}
|
||||
proposerServer := &Server{
|
||||
ChainStartFetcher: &mockExecution.Chain{},
|
||||
Eth1InfoFetcher: &mockExecution.Chain{},
|
||||
Eth1BlockFetcher: &mockExecution.Chain{},
|
||||
BlockReceiver: c,
|
||||
BlobReceiver: c,
|
||||
HeadFetcher: c,
|
||||
BlockNotifier: c.BlockNotifier(),
|
||||
OperationNotifier: c.OperationNotifier(),
|
||||
StateGen: stategen.New(db, doublylinkedtree.New()),
|
||||
TimeFetcher: c,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
}
|
||||
|
||||
// Create an unblinded block at slot 160 (epoch 5)
|
||||
unblindeBlock := util.NewBeaconBlockDeneb()
|
||||
unblindeBlock.Block.Slot = 160
|
||||
unblindeBlock.Block.ProposerIndex = 0
|
||||
unblindeBlock.Block.ParentRoot = parentRoot[:]
|
||||
unblindeBlock.Block.StateRoot = make([]byte, 32)
|
||||
|
||||
req := ðpb.GenericSignedBeaconBlock{
|
||||
Block: ðpb.GenericSignedBeaconBlock_Deneb{
|
||||
Deneb: ðpb.SignedBeaconBlockContentsDeneb{
|
||||
Block: unblindeBlock,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Unblinded blocks should not trigger post-Fulu condition, even at epoch >= FuluForkEpoch
|
||||
res, err := proposerServer.ProposeBeaconBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
require.NotEmpty(t, res.BlockRoot)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -69,6 +69,7 @@ type Server struct {
|
||||
SyncCommitteePool synccommittee.Pool
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
MockEth1Votes bool
|
||||
Eth1BlockFetcher execution.POWBlockFetcher
|
||||
PendingDepositsFetcher depositsnapshot.PendingDepositsFetcher
|
||||
|
||||
@@ -89,6 +89,7 @@ type Config struct {
|
||||
AttestationReceiver blockchain.AttestationReceiver
|
||||
BlockReceiver blockchain.BlockReceiver
|
||||
BlobReceiver blockchain.BlobReceiver
|
||||
DataColumnReceiver blockchain.DataColumnReceiver
|
||||
ExecutionChainService execution.Chain
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
ExecutionChainInfoFetcher execution.ChainInfoFetcher
|
||||
@@ -120,6 +121,7 @@ type Config struct {
|
||||
Router *http.ServeMux
|
||||
ClockWaiter startup.ClockWaiter
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
LCStore *lightClient.Store
|
||||
@@ -196,6 +198,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
|
||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||
BlobStorage: s.cfg.BlobStorage,
|
||||
DataColumnStorage: s.cfg.DataColumnStorage,
|
||||
}
|
||||
rewardFetcher := &rewards.BlockRewardService{Replayer: ch, DB: s.cfg.BeaconDB}
|
||||
coreService := &core.Service{
|
||||
@@ -236,6 +239,7 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
||||
P2P: s.cfg.Broadcaster,
|
||||
BlockReceiver: s.cfg.BlockReceiver,
|
||||
BlobReceiver: s.cfg.BlobReceiver,
|
||||
DataColumnReceiver: s.cfg.DataColumnReceiver,
|
||||
MockEth1Votes: s.cfg.MockEth1Votes,
|
||||
Eth1BlockFetcher: s.cfg.ExecutionChainService,
|
||||
PendingDepositsFetcher: s.cfg.PendingDepositFetcher,
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"mock_blocker.go",
|
||||
"mock_exec_chain_info_fetcher.go",
|
||||
"mock_genesis_timefetcher.go",
|
||||
"mock_sidecars.go",
|
||||
"mock_stater.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v6/beacon-chain/rpc/testutil",
|
||||
|
||||
44
beacon-chain/rpc/testutil/mock_sidecars.go
Normal file
44
beacon-chain/rpc/testutil/mock_sidecars.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package testutil
|
||||
|
||||
import ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
|
||||
// CreateDataColumnSidecar generates a filled dummy data column sidecar
|
||||
func CreateDataColumnSidecar(index uint64, data []byte) *ethpb.DataColumnSidecar {
|
||||
return ðpb.DataColumnSidecar{
|
||||
Index: index,
|
||||
Column: [][]byte{data},
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
KzgCommitments: [][]byte{make([]byte, 48)},
|
||||
KzgProofs: [][]byte{make([]byte, 48)},
|
||||
KzgCommitmentsInclusionProof: [][]byte{make([]byte, 32)},
|
||||
}
|
||||
}
|
||||
|
||||
// CreateBlobSidecar generates a filled dummy data blob sidecar
|
||||
func CreateBlobSidecar(index uint64, blob []byte) *ethpb.BlobSidecar {
|
||||
return ðpb.BlobSidecar{
|
||||
Index: index,
|
||||
Blob: blob,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
KzgCommitment: make([]byte, 48),
|
||||
KzgProof: make([]byte, 48),
|
||||
}
|
||||
}
|
||||
@@ -15,10 +15,14 @@ type MockStater struct {
|
||||
BeaconStateRoot []byte
|
||||
StatesBySlot map[primitives.Slot]state.BeaconState
|
||||
StatesByRoot map[[32]byte]state.BeaconState
|
||||
CustomError error
|
||||
}
|
||||
|
||||
// State --
|
||||
func (m *MockStater) State(ctx context.Context, id []byte) (state.BeaconState, error) {
|
||||
if m.CustomError != nil {
|
||||
return nil, m.CustomError
|
||||
}
|
||||
if m.StateProviderFunc != nil {
|
||||
return m.StateProviderFunc(ctx, id)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"broadcast_bls_changes.go",
|
||||
"context.go",
|
||||
"custody.go",
|
||||
"data_column_sidecars.go",
|
||||
"data_columns_reconstruct.go",
|
||||
"deadlines.go",
|
||||
"decode_pubsub.go",
|
||||
@@ -167,6 +168,7 @@ go_test(
|
||||
"broadcast_bls_changes_test.go",
|
||||
"context_test.go",
|
||||
"custody_test.go",
|
||||
"data_column_sidecars_test.go",
|
||||
"data_columns_reconstruct_test.go",
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
@@ -281,6 +283,7 @@ go_test(
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
|
||||
@@ -91,9 +91,7 @@ func (bs *blobSync) validateNext(rb blocks.ROBlob) error {
|
||||
return err
|
||||
}
|
||||
|
||||
sc := blocks.NewSidecarFromBlobSidecar(rb)
|
||||
|
||||
if err := bs.store.Persist(bs.current, sc); err != nil {
|
||||
if err := bs.store.Persist(bs.current, rb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
950
beacon-chain/sync/data_column_sidecars.go
Normal file
950
beacon-chain/sync/data_column_sidecars.go
Normal file
@@ -0,0 +1,950 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
prysmP2P "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
goPeer "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DataColumnSidecarsParams stores the common parameters needed to
|
||||
// fetch data column sidecars from peers.
|
||||
type DataColumnSidecarsParams struct {
|
||||
Ctx context.Context // Context
|
||||
Tor blockchain.TemporalOracle // Temporal oracle, useful to get the current slot
|
||||
P2P prysmP2P.P2P // P2P network interface
|
||||
RateLimiter *leakybucket.Collector // Rate limiter for outgoing requests
|
||||
CtxMap ContextByteVersions // Context map, useful to know if a message is mapped to the correct fork
|
||||
Storage filesystem.DataColumnStorageReader // Data columns storage
|
||||
NewVerifier verification.NewDataColumnsVerifier // Data columns verifier to check to conformity of incoming data column sidecars
|
||||
DownscorePeerOnRPCFault bool // Downscore a peer if it commits an RPC fault. Not responding sidecars at all is considered as a fault.
|
||||
}
|
||||
|
||||
// FetchDataColumnSidecars retrieves data column sidecars from storage and peers for the given
|
||||
// blocks and requested data column indices. It employs a multi-step strategy:
|
||||
//
|
||||
// 1. Direct retrieval: If all requested columns are available in storage, they are
|
||||
// retrieved directly without reconstruction.
|
||||
// 2. Reconstruction-based retrieval: If some requested columns are missing but sufficient
|
||||
// stored columns exist (at least the minimum required for reconstruction), the function
|
||||
// reconstructs all columns and extracts the requested indices.
|
||||
// 3. Peer retrieval: If storage and reconstruction fail, missing columns are requested
|
||||
// from connected peers that are expected to custody the required data.
|
||||
//
|
||||
// The function returns a map of block roots to their corresponding verified read-only data
|
||||
// columns. It returns an error if data column storage is unavailable, if storage/reconstruction
|
||||
// operations fail unexpectedly, or if not all requested columns could be retrieved from peers.
|
||||
func FetchDataColumnSidecars(
|
||||
params DataColumnSidecarsParams,
|
||||
roBlocks []blocks.ROBlock,
|
||||
indicesMap map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
if len(roBlocks) == 0 || len(indicesMap) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
indices := sortedSliceFromMap(indicesMap)
|
||||
slotsWithCommitments := make(map[primitives.Slot]bool)
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
indicesByRootStored := make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
|
||||
for _, roBlock := range roBlocks {
|
||||
// Filter out blocks without commitments.
|
||||
block := roBlock.Block()
|
||||
commitments, err := block.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get blob kzg commitments for block root %#x", roBlock.Root())
|
||||
}
|
||||
if len(commitments) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
slotsWithCommitments[block.Slot()] = true
|
||||
root := roBlock.Root()
|
||||
|
||||
// Step 1: Get the requested sidecars for this root if available in storage
|
||||
requestedColumns, err := tryGetStoredColumns(params.Storage, root, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||
}
|
||||
if requestedColumns != nil {
|
||||
result[root] = requestedColumns
|
||||
continue
|
||||
}
|
||||
|
||||
// Step 2: If step 1 failed, reconstruct the requested sidecars from what is available in storage
|
||||
requestedColumns, err = tryGetReconstructedColumns(params.Storage, root, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get reconstructed columns for root %#x", root)
|
||||
}
|
||||
if requestedColumns != nil {
|
||||
result[root] = requestedColumns
|
||||
continue
|
||||
}
|
||||
|
||||
// Step 3a: If steps 1 and 2 failed, keep track of the sidecars that need to be queried from peers
|
||||
// and those that are already stored.
|
||||
indicesToQueryMap, indicesStoredMap := categorizeIndices(params.Storage, root, indices)
|
||||
|
||||
if len(indicesToQueryMap) > 0 {
|
||||
missingIndicesByRoot[root] = indicesToQueryMap
|
||||
}
|
||||
if len(indicesStoredMap) > 0 {
|
||||
indicesByRootStored[root] = indicesStoredMap
|
||||
}
|
||||
}
|
||||
|
||||
// Early return if no sidecars need to be queried from peers.
|
||||
if len(missingIndicesByRoot) == 0 {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Step 3b: Request missing sidecars from peers.
|
||||
start, count := time.Now(), computeTotalCount(missingIndicesByRoot)
|
||||
fromPeersResult, err := tryRequestingColumnsFromPeers(params, roBlocks, slotsWithCommitments, missingIndicesByRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "request from peers")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{"duration": time.Since(start), "count": count}).Debug("Requested data column sidecars from peers")
|
||||
|
||||
// Step 3c: If needed, try to reconstruct missing sidecars from storage and fetched data.
|
||||
fromReconstructionResult, err := tryReconstructFromStorageAndPeers(params.Storage, fromPeersResult, indicesMap, missingIndicesByRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "reconstruct from storage and peers")
|
||||
}
|
||||
|
||||
for root, verifiedSidecars := range fromReconstructionResult {
|
||||
result[root] = verifiedSidecars
|
||||
}
|
||||
|
||||
for root := range fromPeersResult {
|
||||
if _, ok := fromReconstructionResult[root]; ok {
|
||||
// We already have what we need from peers + reconstruction
|
||||
continue
|
||||
}
|
||||
|
||||
result[root] = append(result[root], fromPeersResult[root]...)
|
||||
|
||||
storedIndices := indicesByRootStored[root]
|
||||
if len(storedIndices) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
storedColumns, err := tryGetStoredColumns(params.Storage, root, sortedSliceFromMap(storedIndices))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "try get direct columns for root %#x", root)
|
||||
}
|
||||
|
||||
result[root] = append(result[root], storedColumns...)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// tryGetStoredColumns attempts to retrieve all requested data column sidecars directly from storage
|
||||
// if they are all available. Returns the sidecars if successful, and nil if at least one
|
||||
// requested sidecar is not available in the storage.
|
||||
func tryGetStoredColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if all requested indices are present in cache
|
||||
storedIndices := storage.Summary(blockRoot).Stored()
|
||||
allRequestedPresent := true
|
||||
for _, requestedIndex := range indices {
|
||||
if !storedIndices[requestedIndex] {
|
||||
allRequestedPresent = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allRequestedPresent {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// All requested data is present, retrieve directly from DB
|
||||
requestedColumns, err := storage.Get(blockRoot, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get data columns for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
return requestedColumns, nil
|
||||
}
|
||||
|
||||
// tryGetReconstructedColumns attempts to retrieve columns using reconstruction
|
||||
// if sufficient columns are available. Returns the columns if successful, nil and nil if insufficient columns,
|
||||
// or nil and error if an error occurs.
|
||||
func tryGetReconstructedColumns(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if we have enough columns for reconstruction
|
||||
summary := storage.Summary(blockRoot)
|
||||
if summary.Count() < peerdas.MinimumColumnCountToReconstruct() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Retrieve all stored columns for reconstruction
|
||||
allStoredColumns, err := storage.Get(blockRoot, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get all stored columns for reconstruction for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Attempt reconstruction
|
||||
reconstructedColumns, err := peerdas.ReconstructDataColumnSidecars(allStoredColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct data columns for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
// Health check: ensure we have the expected number of columns
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if uint64(len(reconstructedColumns)) != numberOfColumns {
|
||||
return nil, errors.Errorf("reconstructed %d columns but expected %d for block root %#x", len(reconstructedColumns), numberOfColumns, blockRoot)
|
||||
}
|
||||
|
||||
// Extract only the requested indices from reconstructed data using direct indexing
|
||||
requestedColumns := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||
for _, requestedIndex := range indices {
|
||||
if requestedIndex >= numberOfColumns {
|
||||
return nil, errors.Errorf("requested column index %d exceeds maximum %d for block root %#x", requestedIndex, numberOfColumns-1, blockRoot)
|
||||
}
|
||||
requestedColumns = append(requestedColumns, reconstructedColumns[requestedIndex])
|
||||
}
|
||||
|
||||
return requestedColumns, nil
|
||||
}
|
||||
|
||||
// categorizeIndices separates indices into those that need to be queried from peers
|
||||
// and those that are already stored.
|
||||
func categorizeIndices(storage filesystem.DataColumnStorageReader, blockRoot [fieldparams.RootLength]byte, indices []uint64) (map[uint64]bool, map[uint64]bool) {
|
||||
indicesToQuery := make(map[uint64]bool, len(indices))
|
||||
indicesStored := make(map[uint64]bool, len(indices))
|
||||
|
||||
allStoredIndices := storage.Summary(blockRoot).Stored()
|
||||
for _, index := range indices {
|
||||
if allStoredIndices[index] {
|
||||
indicesStored[index] = true
|
||||
continue
|
||||
}
|
||||
indicesToQuery[index] = true
|
||||
}
|
||||
|
||||
return indicesToQuery, indicesStored
|
||||
}
|
||||
|
||||
// tryRequestingColumnsFromPeers attempts to request missing data column sidecars from connected peers.
|
||||
// It explores the connected peers to find those that are expected to custody the requested columns
|
||||
// and returns only when all requested columns are either retrieved or have been tried to be retrieved
|
||||
// by all possible peers.
|
||||
// WARNING: This function alters `missingIndicesByRoot` by removing successfully retrieved columns.
|
||||
// After running this function, the user can check the content of the (modified) `missingIndicesByRoot` map
|
||||
// to check if some sidecars are still missing.
|
||||
func tryRequestingColumnsFromPeers(
|
||||
p DataColumnSidecarsParams,
|
||||
roBlocks []blocks.ROBlock,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
// Create a new random source for peer selection.
|
||||
randomSource := rand.NewGenerator()
|
||||
|
||||
// Compute slots by block root.
|
||||
slotByRoot := computeSlotByBlockRoot(roBlocks)
|
||||
|
||||
// Determine all sidecars each peers are expected to custody.
|
||||
connectedPeersSlice := p.P2P.Peers().Connected()
|
||||
connectedPeers := make(map[goPeer.ID]bool, len(connectedPeersSlice))
|
||||
for _, peer := range connectedPeersSlice {
|
||||
connectedPeers[peer] = true
|
||||
}
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
|
||||
verifiedColumnsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for len(missingIndicesByRoot) > 0 && len(indicesByRootByPeer) > 0 {
|
||||
// Select peers to query the missing sidecars from.
|
||||
indicesByRootByPeerToQuery, err := selectPeers(p, randomSource, len(missingIndicesByRoot), indicesByRootByPeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "select peers")
|
||||
}
|
||||
|
||||
// Remove selected peers from the maps.
|
||||
for peer := range indicesByRootByPeerToQuery {
|
||||
delete(connectedPeers, peer)
|
||||
}
|
||||
|
||||
// Fetch the sidecars from the chosen peers.
|
||||
roDataColumnsByPeer := fetchDataColumnSidecarsFromPeers(p, slotByRoot, slotsWithCommitments, indicesByRootByPeerToQuery)
|
||||
|
||||
// Verify the received data column sidecars.
|
||||
verifiedRoDataColumnSidecars, err := verifyDataColumnSidecarsByPeer(p.P2P, p.NewVerifier, roDataColumnsByPeer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "verify data columns sidecars by peer")
|
||||
}
|
||||
|
||||
// Remove the verified sidecars from the missing indices map and compute the new verified columns by root.
|
||||
localVerifiedColumnsByRoot := updateResults(verifiedRoDataColumnSidecars, missingIndicesByRoot)
|
||||
for root, verifiedRoDataColumns := range localVerifiedColumnsByRoot {
|
||||
verifiedColumnsByRoot[root] = append(verifiedColumnsByRoot[root], verifiedRoDataColumns...)
|
||||
}
|
||||
|
||||
// Compute indices by root by peers with the updated missing indices and connected peers.
|
||||
indicesByRootByPeer, err = computeIndicesByRootByPeer(p.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "explore peers")
|
||||
}
|
||||
}
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
// tryReconstructFromStorageAndPeers attempts to reconstruct missing data column sidecars
|
||||
// using the data available in the storage and the data fetched from peers.
|
||||
// If, for at least one root, the reconstruction is not possible, an error is returned.
|
||||
func tryReconstructFromStorageAndPeers(
|
||||
storage filesystem.DataColumnStorageReader,
|
||||
fromPeersByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn,
|
||||
indices map[uint64]bool,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
if len(missingIndicesByRoot) == 0 {
|
||||
// Nothing to do, return early.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
start := time.Now()
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(missingIndicesByRoot))
|
||||
for root := range missingIndicesByRoot {
|
||||
// Check if a reconstruction is possible based on what we have from the store and fetched from peers.
|
||||
summary := storage.Summary(root)
|
||||
storedCount := summary.Count()
|
||||
fetchedCount := uint64(len(fromPeersByRoot[root]))
|
||||
|
||||
if storedCount+fetchedCount < minimumColumnsCountToReconstruct {
|
||||
return nil, errors.Errorf("cannot reconstruct all needed columns for root %#x. stored: %d, fetched: %d, minimum: %d", root, storedCount, fetchedCount, minimumColumnsCountToReconstruct)
|
||||
}
|
||||
|
||||
// Load all we have in the store.
|
||||
storedSidecars, err := storage.Get(root, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get stored sidecars for root %#x", root)
|
||||
}
|
||||
|
||||
sidecars := make([]blocks.VerifiedRODataColumn, 0, storedCount+fetchedCount)
|
||||
sidecars = append(sidecars, storedSidecars...)
|
||||
sidecars = append(sidecars, fromPeersByRoot[root]...)
|
||||
|
||||
// Attempt reconstruction.
|
||||
reconstructedSidecars, err := peerdas.ReconstructDataColumnSidecars(sidecars)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to reconstruct data columns for root %#x", root)
|
||||
}
|
||||
|
||||
// Select only sidecars we need.
|
||||
for _, sidecar := range reconstructedSidecars {
|
||||
if indices[sidecar.Index] {
|
||||
result[root] = append(result[root], sidecar)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"rootCount": len(missingIndicesByRoot),
|
||||
"elapsed": time.Since(start),
|
||||
}).Debug("Reconstructed from storage and peers")
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// selectPeers selects peers to query the sidecars.
|
||||
// It begins by randomly selecting a peer in `origIndicesByRootByPeer` that has enough bandwidth,
|
||||
// and assigns to it all its available sidecars. Then, it randomly select an other peer, until
|
||||
// all sidecars in `missingIndicesByRoot` are covered.
|
||||
func selectPeers(
|
||||
p DataColumnSidecarsParams,
|
||||
randomSource *rand.Rand,
|
||||
count int,
|
||||
origIndicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
const randomPeerTimeout = 2 * time.Minute
|
||||
|
||||
// Select peers to query the missing sidecars from.
|
||||
indicesByRootByPeer := copyIndicesByRootByPeer(origIndicesByRootByPeer)
|
||||
internalIndicesByRootByPeer := copyIndicesByRootByPeer(indicesByRootByPeer)
|
||||
indicesByRootByPeerToQuery := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
for len(internalIndicesByRootByPeer) > 0 {
|
||||
// Randomly select a peer with enough bandwidth.
|
||||
peer, err := func() (goPeer.ID, error) {
|
||||
ctx, cancel := context.WithTimeout(p.Ctx, randomPeerTimeout)
|
||||
defer cancel()
|
||||
|
||||
peer, err := randomPeer(ctx, randomSource, p.RateLimiter, count, internalIndicesByRootByPeer)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "select random peer")
|
||||
}
|
||||
|
||||
return peer, err
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Query all the sidecars that peer can offer us.
|
||||
newIndicesByRoot, ok := internalIndicesByRootByPeer[peer]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("peer %s not found in internal indices by root by peer map", peer)
|
||||
}
|
||||
|
||||
indicesByRootByPeerToQuery[peer] = newIndicesByRoot
|
||||
|
||||
// Remove this peer from the maps to avoid re-selection.
|
||||
delete(indicesByRootByPeer, peer)
|
||||
delete(internalIndicesByRootByPeer, peer)
|
||||
|
||||
// Delete the corresponding sidecars from other peers in the internal map
|
||||
// to avoid re-selection during this iteration.
|
||||
for peer, indicesByRoot := range internalIndicesByRootByPeer {
|
||||
for root, indices := range indicesByRoot {
|
||||
newIndices := newIndicesByRoot[root]
|
||||
for index := range newIndices {
|
||||
delete(indices, index)
|
||||
}
|
||||
if len(indices) == 0 {
|
||||
delete(indicesByRoot, root)
|
||||
}
|
||||
}
|
||||
if len(indicesByRoot) == 0 {
|
||||
delete(internalIndicesByRootByPeer, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return indicesByRootByPeerToQuery, nil
|
||||
}
|
||||
|
||||
// updateResults updates the missing indices and verified sidecars maps based on the newly verified sidecars.
|
||||
// WARNING: This function alters `missingIndicesByRoot` by removing verified sidecars.
|
||||
// After running this function, the user can check the content of the (modified) `missingIndicesByRoot` map
|
||||
// to check if some sidecars are still missing.
|
||||
func updateResults(
|
||||
verifiedSidecars []blocks.VerifiedRODataColumn,
|
||||
missingIndicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn {
|
||||
// Copy the original map to avoid modifying it directly.
|
||||
verifiedSidecarsByRoot := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn)
|
||||
for _, verifiedSidecar := range verifiedSidecars {
|
||||
blockRoot := verifiedSidecar.BlockRoot()
|
||||
index := verifiedSidecar.Index
|
||||
|
||||
// Add to the result map grouped by block root
|
||||
verifiedSidecarsByRoot[blockRoot] = append(verifiedSidecarsByRoot[blockRoot], verifiedSidecar)
|
||||
|
||||
if indices, ok := missingIndicesByRoot[blockRoot]; ok {
|
||||
delete(indices, index)
|
||||
if len(indices) == 0 {
|
||||
delete(missingIndicesByRoot, blockRoot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return verifiedSidecarsByRoot
|
||||
}
|
||||
|
||||
// fetchDataColumnSidecarsFromPeers retrieves data column sidecars from peers.
|
||||
func fetchDataColumnSidecarsFromPeers(
|
||||
params DataColumnSidecarsParams,
|
||||
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
indicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) map[goPeer.ID][]blocks.RODataColumn {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
mut sync.Mutex
|
||||
)
|
||||
|
||||
roDataColumnsByPeer := make(map[goPeer.ID][]blocks.RODataColumn)
|
||||
wg.Add(len(indicesByRootByPeer))
|
||||
for peerID, indicesByRoot := range indicesByRootByPeer {
|
||||
go func(peerID goPeer.ID, indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) {
|
||||
defer wg.Done()
|
||||
|
||||
requestedCount := 0
|
||||
for _, indices := range indicesByRoot {
|
||||
requestedCount += len(indices)
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peerID": peerID,
|
||||
"agent": agentString(peerID, params.P2P.Host()),
|
||||
"blockCount": len(indicesByRoot),
|
||||
"totalRequestedCount": requestedCount,
|
||||
})
|
||||
|
||||
roDataColumns, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, peerID, indicesByRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Warning("Failed to send data column sidecars request")
|
||||
return
|
||||
}
|
||||
|
||||
mut.Lock()
|
||||
defer mut.Unlock()
|
||||
roDataColumnsByPeer[peerID] = roDataColumns
|
||||
}(peerID, indicesByRoot)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return roDataColumnsByPeer
|
||||
}
|
||||
|
||||
func sendDataColumnSidecarsRequest(
|
||||
params DataColumnSidecarsParams,
|
||||
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
peerID goPeer.ID,
|
||||
indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) ([]blocks.RODataColumn, error) {
|
||||
const batchSize = 32
|
||||
|
||||
rootCount := int64(len(indicesByRoot))
|
||||
requestedSidecarsCount := 0
|
||||
for _, indices := range indicesByRoot {
|
||||
requestedSidecarsCount += len(indices)
|
||||
}
|
||||
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"peerID": peerID,
|
||||
"agent": agentString(peerID, params.P2P.Host()),
|
||||
"requestedSidecars": requestedSidecarsCount,
|
||||
})
|
||||
|
||||
// Try to build a by range byRangeRequest first.
|
||||
byRangeRequests, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, batchSize)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "craft by range request")
|
||||
}
|
||||
|
||||
// If we have a valid by range request, send it.
|
||||
if len(byRangeRequests) > 0 {
|
||||
count := 0
|
||||
for _, indices := range indicesByRoot {
|
||||
count += len(indices)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
roDataColumns := make([]blocks.RODataColumn, 0, count)
|
||||
for _, request := range byRangeRequests {
|
||||
if params.RateLimiter != nil {
|
||||
params.RateLimiter.Add(peerID.String(), rootCount)
|
||||
}
|
||||
|
||||
localRoDataColumns, err := SendDataColumnSidecarsByRangeRequest(params, peerID, request)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "send data column sidecars by range request to peer %s", peerID)
|
||||
}
|
||||
|
||||
roDataColumns = append(roDataColumns, localRoDataColumns...)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"respondedSidecars": len(roDataColumns),
|
||||
"requests": len(byRangeRequests),
|
||||
"type": "byRange",
|
||||
"duration": time.Since(start),
|
||||
}).Debug("Received data column sidecars")
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// Build identifiers for the by root request.
|
||||
byRootRequest := buildByRootRequest(indicesByRoot)
|
||||
|
||||
// Send the by root request.
|
||||
start := time.Now()
|
||||
if params.RateLimiter != nil {
|
||||
params.RateLimiter.Add(peerID.String(), rootCount)
|
||||
}
|
||||
roDataColumns, err := SendDataColumnSidecarsByRootRequest(params, peerID, byRootRequest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "send data column sidecars by root request to peer %s", peerID)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"respondedSidecars": len(roDataColumns),
|
||||
"requests": 1,
|
||||
"type": "byRoot",
|
||||
"duration": time.Since(start),
|
||||
}).Debug("Received data column sidecars")
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
// buildByRangeRequests constructs a by range request from the given indices,
|
||||
// only if the indices are the same all blocks and if the blocks are contiguous.
|
||||
// (Missing blocks or blocks without commitments do count as contiguous)
|
||||
// If one of this condition is not met, returns nil.
|
||||
func buildByRangeRequests(
|
||||
slotByRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
slotsWithCommitments map[primitives.Slot]bool,
|
||||
indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
batchSize uint64,
|
||||
) ([]*ethpb.DataColumnSidecarsByRangeRequest, error) {
|
||||
if len(indicesByRoot) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var reference map[uint64]bool
|
||||
slots := make([]primitives.Slot, 0, len(slotByRoot))
|
||||
for root, indices := range indicesByRoot {
|
||||
if reference == nil {
|
||||
reference = indices
|
||||
}
|
||||
|
||||
if !compareIndices(reference, indices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
slot, ok := slotByRoot[root]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("slot not found for block root %#x", root)
|
||||
}
|
||||
|
||||
slots = append(slots, slot)
|
||||
}
|
||||
|
||||
slices.Sort(slots)
|
||||
|
||||
for i := 1; i < len(slots); i++ {
|
||||
previous, current := slots[i-1], slots[i]
|
||||
if current == previous+1 {
|
||||
continue
|
||||
}
|
||||
|
||||
for j := previous + 1; j < current; j++ {
|
||||
if slotsWithCommitments[j] {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
columns := sortedSliceFromMap(reference)
|
||||
startSlot, endSlot := slots[0], slots[len(slots)-1]
|
||||
totalCount := uint64(endSlot - startSlot + 1)
|
||||
|
||||
requests := make([]*ethpb.DataColumnSidecarsByRangeRequest, 0, totalCount/batchSize)
|
||||
for start := startSlot; start <= endSlot; start += primitives.Slot(batchSize) {
|
||||
end := min(start+primitives.Slot(batchSize)-1, endSlot)
|
||||
request := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: start,
|
||||
Count: uint64(end - start + 1),
|
||||
Columns: columns,
|
||||
}
|
||||
|
||||
requests = append(requests, request)
|
||||
}
|
||||
|
||||
return requests, nil
|
||||
}
|
||||
|
||||
// buildByRootRequest constructs a by root request from the given indices.
|
||||
func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint64]bool) p2ptypes.DataColumnsByRootIdentifiers {
|
||||
identifiers := make(p2ptypes.DataColumnsByRootIdentifiers, 0, len(indicesByRoot))
|
||||
for root, indices := range indicesByRoot {
|
||||
identifier := ð.DataColumnsByRootIdentifier{
|
||||
BlockRoot: root[:],
|
||||
Columns: sortedSliceFromMap(indices),
|
||||
}
|
||||
identifiers = append(identifiers, identifier)
|
||||
}
|
||||
|
||||
// Sort identifiers to have a deterministic output.
|
||||
slices.SortFunc(identifiers, func(left, right *eth.DataColumnsByRootIdentifier) int {
|
||||
if cmp := bytes.Compare(left.BlockRoot, right.BlockRoot); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return slices.Compare(left.Columns, right.Columns)
|
||||
})
|
||||
|
||||
return identifiers
|
||||
}
|
||||
|
||||
// verifyDataColumnSidecarsByPeer verifies the received data column sidecars.
|
||||
// If at least one sidecar from a peer is invalid, the peer is downscored and
|
||||
// all its sidecars are rejected. (Sidecars from other peers are still accepted.)
|
||||
func verifyDataColumnSidecarsByPeer(
|
||||
p2p prysmP2P.P2P,
|
||||
newVerifier verification.NewDataColumnsVerifier,
|
||||
roDataColumnsByPeer map[goPeer.ID][]blocks.RODataColumn,
|
||||
) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// First optimistically verify all received data columns in a single batch.
|
||||
count := 0
|
||||
for _, columns := range roDataColumnsByPeer {
|
||||
count += len(columns)
|
||||
}
|
||||
|
||||
roDataColumnSidecars := make([]blocks.RODataColumn, 0, count)
|
||||
for _, columns := range roDataColumnsByPeer {
|
||||
roDataColumnSidecars = append(roDataColumnSidecars, columns...)
|
||||
}
|
||||
|
||||
verifiedRoDataColumnSidecars, err := verifyByRootDataColumnSidecars(newVerifier, roDataColumnSidecars)
|
||||
if err == nil {
|
||||
// This is the happy path where all sidecars are verified.
|
||||
return verifiedRoDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// An error occurred during verification, which means that at least one sidecar is invalid.
|
||||
// Reverify peer by peer to identify faulty peer(s), reject all its sidecars, and downscore it.
|
||||
verifiedRoDataColumnSidecars = make([]blocks.VerifiedRODataColumn, 0, count)
|
||||
for peer, columns := range roDataColumnsByPeer {
|
||||
peerVerifiedRoDataColumnSidecars, err := verifyByRootDataColumnSidecars(newVerifier, columns)
|
||||
if err != nil {
|
||||
// This peer has invalid sidecars.
|
||||
log := log.WithError(err).WithField("peerID", peer)
|
||||
newScore := p2p.Peers().Scorers().BadResponsesScorer().Increment(peer)
|
||||
log.Warning("Peer returned invalid data column sidecars")
|
||||
log.WithFields(logrus.Fields{"reason": "invalidDataColumnSidecars", "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
verifiedRoDataColumnSidecars = append(verifiedRoDataColumnSidecars, peerVerifiedRoDataColumnSidecars...)
|
||||
}
|
||||
|
||||
return verifiedRoDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// verifyByRootDataColumnSidecars verifies the provided read-only data columns against the
|
||||
// requirements for data column sidecars received via the by root request.
|
||||
func verifyByRootDataColumnSidecars(newVerifier verification.NewDataColumnsVerifier, roDataColumns []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
verifier := newVerifier(roDataColumns, verification.ByRootRequestDataColumnSidecarRequirements)
|
||||
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return nil, errors.Wrap(err, "valid fields")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
verifiedRoDataColumns, err := verifier.VerifiedRODataColumns()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "verified RO data columns - should never happen")
|
||||
}
|
||||
|
||||
return verifiedRoDataColumns, nil
|
||||
}
|
||||
|
||||
// computeIndicesByRootByPeer returns a peers->root->indices map only for
|
||||
// root and indices given in `indicesByBlockRoot`. It also only selects peers
|
||||
// for a given root only if its head state is higher than the block slot.
|
||||
func computeIndicesByRootByPeer(
|
||||
p2p prysmP2P.P2P,
|
||||
slotByBlockRoot map[[fieldparams.RootLength]byte]primitives.Slot,
|
||||
indicesByBlockRoot map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
peers map[goPeer.ID]bool,
|
||||
) (map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, error) {
|
||||
// First, compute custody columns for all peers
|
||||
peersByIndex := make(map[uint64]map[goPeer.ID]bool)
|
||||
headSlotByPeer := make(map[goPeer.ID]primitives.Slot)
|
||||
for peer := range peers {
|
||||
// Computes the custody columns for each peer
|
||||
nodeID, err := prysmP2P.ConvertPeerIDToNodeID(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "convert peer ID to node ID for peer %s", peer)
|
||||
}
|
||||
|
||||
custodyGroupCount := p2p.CustodyGroupCountFromPeer(peer)
|
||||
|
||||
dasInfo, _, err := peerdas.Info(nodeID, custodyGroupCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "peerdas info for peer %s", peer)
|
||||
}
|
||||
|
||||
for column := range dasInfo.CustodyColumns {
|
||||
if _, exists := peersByIndex[column]; !exists {
|
||||
peersByIndex[column] = make(map[goPeer.ID]bool)
|
||||
}
|
||||
peersByIndex[column][peer] = true
|
||||
}
|
||||
|
||||
// Compute the head slot for each peer
|
||||
peerChainState, err := p2p.Peers().ChainState(peer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get chain state for peer %s", peer)
|
||||
}
|
||||
|
||||
if peerChainState == nil {
|
||||
return nil, errors.Errorf("chain state is nil for peer %s", peer)
|
||||
}
|
||||
|
||||
headSlotByPeer[peer] = peerChainState.HeadSlot
|
||||
}
|
||||
|
||||
// For each block root and its indices, find suitable peers
|
||||
indicesByRootByPeer := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
for blockRoot, indices := range indicesByBlockRoot {
|
||||
blockSlot, ok := slotByBlockRoot[blockRoot]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("slot not found for block root %#x", blockRoot)
|
||||
}
|
||||
|
||||
for index := range indices {
|
||||
peers := peersByIndex[index]
|
||||
for peer := range peers {
|
||||
peerHeadSlot, ok := headSlotByPeer[peer]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("head slot not found for peer %s", peer)
|
||||
}
|
||||
|
||||
if peerHeadSlot < blockSlot {
|
||||
continue
|
||||
}
|
||||
|
||||
// Build peers->root->indices map
|
||||
if _, exists := indicesByRootByPeer[peer]; !exists {
|
||||
indicesByRootByPeer[peer] = make(map[[fieldparams.RootLength]byte]map[uint64]bool)
|
||||
}
|
||||
if _, exists := indicesByRootByPeer[peer][blockRoot]; !exists {
|
||||
indicesByRootByPeer[peer][blockRoot] = make(map[uint64]bool)
|
||||
}
|
||||
indicesByRootByPeer[peer][blockRoot][index] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return indicesByRootByPeer, nil
|
||||
}
|
||||
|
||||
// randomPeer selects a random peer. If no peers has enough bandwidth, it will wait and retry.
|
||||
// Returns the selected peer ID and any error.
|
||||
func randomPeer(
|
||||
ctx context.Context,
|
||||
randomSource *rand.Rand,
|
||||
rateLimiter *leakybucket.Collector,
|
||||
count int,
|
||||
indicesByRootByPeer map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool,
|
||||
) (goPeer.ID, error) {
|
||||
const waitPeriod = 5 * time.Second
|
||||
|
||||
peerCount := len(indicesByRootByPeer)
|
||||
if peerCount == 0 {
|
||||
return "", errors.New("no peers available")
|
||||
}
|
||||
|
||||
for ctx.Err() == nil {
|
||||
nonRateLimitedPeers := make([]goPeer.ID, 0, len(indicesByRootByPeer))
|
||||
for peer := range indicesByRootByPeer {
|
||||
if rateLimiter == nil || rateLimiter.Remaining(peer.String()) >= int64(count) {
|
||||
nonRateLimitedPeers = append(nonRateLimitedPeers, peer)
|
||||
}
|
||||
}
|
||||
|
||||
slices.Sort(nonRateLimitedPeers)
|
||||
|
||||
if len(nonRateLimitedPeers) == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"peerCount": peerCount,
|
||||
"delay": waitPeriod,
|
||||
}).Debug("Waiting for a peer with enough bandwidth for data column sidecars")
|
||||
time.Sleep(waitPeriod)
|
||||
continue
|
||||
}
|
||||
|
||||
randomIndex := randomSource.Intn(len(nonRateLimitedPeers))
|
||||
return nonRateLimitedPeers[randomIndex], nil
|
||||
}
|
||||
|
||||
return "", ctx.Err()
|
||||
}
|
||||
|
||||
// copyIndicesByRootByPeer creates a deep copy of the given nested map.
|
||||
// Returns a new map with the same structure and contents.
|
||||
func copyIndicesByRootByPeer(original map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool) map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool {
|
||||
copied := make(map[goPeer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool, len(original))
|
||||
for peer, indicesByRoot := range original {
|
||||
copied[peer] = copyIndicesByRoot(indicesByRoot)
|
||||
}
|
||||
|
||||
return copied
|
||||
}
|
||||
|
||||
// copyIndicesByRoot creates a deep copy of the given nested map.
|
||||
// Returns a new map with the same structure and contents.
|
||||
func copyIndicesByRoot(original map[[fieldparams.RootLength]byte]map[uint64]bool) map[[fieldparams.RootLength]byte]map[uint64]bool {
|
||||
copied := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(original))
|
||||
for root, indexMap := range original {
|
||||
copied[root] = make(map[uint64]bool, len(indexMap))
|
||||
for index, value := range indexMap {
|
||||
copied[root][index] = value
|
||||
}
|
||||
}
|
||||
return copied
|
||||
}
|
||||
|
||||
// compareIndices compares two map[uint64]bool and returns true if they are equal.
|
||||
func compareIndices(left, right map[uint64]bool) bool {
|
||||
if len(left) != len(right) {
|
||||
return false
|
||||
}
|
||||
|
||||
for key, leftValue := range left {
|
||||
rightValue, exists := right[key]
|
||||
if !exists || leftValue != rightValue {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// sortedSliceFromMap converts a map[uint64]bool to a sorted slice of keys.
|
||||
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
slices.Sort(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// computeSlotByBlockRoot maps each block root to its corresponding slot.
|
||||
func computeSlotByBlockRoot(roBlocks []blocks.ROBlock) map[[fieldparams.RootLength]byte]primitives.Slot {
|
||||
slotByBlockRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, len(roBlocks))
|
||||
for _, roBlock := range roBlocks {
|
||||
slotByBlockRoot[roBlock.Root()] = roBlock.Block().Slot()
|
||||
}
|
||||
return slotByBlockRoot
|
||||
}
|
||||
|
||||
// computeTotalCount calculates the total count of indices across all roots.
|
||||
func computeTotalCount(input map[[fieldparams.RootLength]byte]map[uint64]bool) int {
|
||||
totalCount := 0
|
||||
for _, indices := range input {
|
||||
totalCount += len(indices)
|
||||
}
|
||||
return totalCount
|
||||
}
|
||||
998
beacon-chain/sync/data_column_sidecars_test.go
Normal file
998
beacon-chain/sync/data_column_sidecars_test.go
Normal file
@@ -0,0 +1,998 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v6/container/leaky-bucket"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
func TestFetchDataColumnSidecars(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
// Slot 1: All needed sidecars are available in storage
|
||||
// Slot 2: No commitment
|
||||
// Slot 3: All sidecars are saved excepted the needed ones
|
||||
// Slot 4: Some sidecars are in the storage, other have to be retrieved from peers.
|
||||
// Slot 5: Some sidecars are in the storage, other have to be retrieved from peers but peers do not deliver all requested sidecars.
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
const blobCount = 3
|
||||
indices := map[uint64]bool{31: true, 81: true, 106: true}
|
||||
|
||||
// Block 1
|
||||
block1, _, verifiedSidecars1 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(1))
|
||||
root1 := block1.Root()
|
||||
|
||||
toStore1 := make([]blocks.VerifiedRODataColumn, 0, len(indices))
|
||||
for index := range indices {
|
||||
sidecar := verifiedSidecars1[index]
|
||||
toStore1 = append(toStore1, sidecar)
|
||||
}
|
||||
|
||||
err = storage.Save(toStore1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 2
|
||||
block2, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0, util.WithSlot(2))
|
||||
|
||||
// Block 3
|
||||
block3, _, verifiedSidecars3 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(3))
|
||||
root3 := block3.Root()
|
||||
|
||||
toStore3 := make([]blocks.VerifiedRODataColumn, 0, numberOfColumns-uint64(len(indices)))
|
||||
for i := range numberOfColumns {
|
||||
if !indices[i] {
|
||||
sidecar := verifiedSidecars3[i]
|
||||
toStore3 = append(toStore3, sidecar)
|
||||
}
|
||||
}
|
||||
|
||||
err = storage.Save(toStore3)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 4
|
||||
block4, _, verifiedSidecars4 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(4))
|
||||
root4 := block4.Root()
|
||||
toStore4 := []blocks.VerifiedRODataColumn{verifiedSidecars4[106]}
|
||||
|
||||
err = storage.Save(toStore4)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Block 5
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
block5, _, verifiedSidecars5 := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(5))
|
||||
root5 := block5.Root()
|
||||
|
||||
toStoreCount := minimumColumnsCountToReconstruct - 1
|
||||
toStore5 := make([]blocks.VerifiedRODataColumn, 0, toStoreCount)
|
||||
|
||||
for i := uint64(0); uint64(len(toStore5)) < toStoreCount; i++ {
|
||||
sidecar := verifiedSidecars5[minimumColumnsCountToReconstruct+i]
|
||||
if sidecar.Index == 81 {
|
||||
continue
|
||||
}
|
||||
|
||||
toStore5 = append(toStore5, sidecar)
|
||||
}
|
||||
|
||||
err = storage.Save(toStore5)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Custody columns with this private key and 4-cgc: 31, 81, 97, 105
|
||||
privateKeyBytes := [32]byte{1}
|
||||
privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes[:])
|
||||
require.NoError(t, err)
|
||||
|
||||
// Peers
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t, libp2p.Identity(privateKey))
|
||||
p2p.Peers().SetConnectionState(other.PeerID(), peers.Connected)
|
||||
p2p.Connect(other)
|
||||
|
||||
p2p.Peers().SetChainState(other.PeerID(), ðpb.StatusV2{
|
||||
HeadSlot: 5,
|
||||
})
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 4,
|
||||
Count: 2,
|
||||
Columns: []uint64{31, 81},
|
||||
}
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
|
||||
gs := startup.NewClockSynchronizer()
|
||||
err = gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
actualRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, actualRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, actualRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[31].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars4[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), verifiedSidecars5[81].DataColumnSidecar)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
Tor: clock,
|
||||
P2P: p2p,
|
||||
RateLimiter: leakybucket.NewCollector(1., 10, time.Second, false /* deleteEmptyBuckets */),
|
||||
CtxMap: ctxMap,
|
||||
Storage: storage,
|
||||
NewVerifier: newDataColumnsVerifier,
|
||||
}
|
||||
|
||||
expected := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
|
||||
root1: {verifiedSidecars1[31], verifiedSidecars1[81], verifiedSidecars1[106]},
|
||||
// no root2 (no commitments in this block)
|
||||
root3: {verifiedSidecars3[31], verifiedSidecars3[81], verifiedSidecars3[106]},
|
||||
root4: {verifiedSidecars4[31], verifiedSidecars4[81], verifiedSidecars4[106]},
|
||||
root5: {verifiedSidecars5[31], verifiedSidecars5[81], verifiedSidecars5[106]},
|
||||
}
|
||||
|
||||
blocks := []blocks.ROBlock{block1, block2, block3, block4, block5}
|
||||
actual, err := FetchDataColumnSidecars(params, blocks, indices)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for root := range expected {
|
||||
require.Equal(t, len(expected[root]), len(actual[root]))
|
||||
for i := range expected[root] {
|
||||
require.DeepSSZEqual(t, expected[root][i], actual[root][i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCategorizeIndices(t *testing.T) {
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 14, Column: [][]byte{{1}, {2}, {3}}},
|
||||
})
|
||||
|
||||
err := storage.Save(verifiedRoSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedToQuery := map[uint64]bool{13: true}
|
||||
expectedStored := map[uint64]bool{12: true, 14: true}
|
||||
|
||||
actualToQuery, actualStored := categorizeIndices(storage, verifiedRoSidecars[0].BlockRoot(), []uint64{12, 13, 14})
|
||||
|
||||
require.Equal(t, len(expectedToQuery), len(actualToQuery))
|
||||
require.Equal(t, len(expectedStored), len(actualStored))
|
||||
|
||||
for index := range expectedToQuery {
|
||||
require.Equal(t, true, actualToQuery[index])
|
||||
}
|
||||
for index := range expectedStored {
|
||||
require.Equal(t, true, actualStored[index])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelectPeers(t *testing.T) {
|
||||
const (
|
||||
count = 3
|
||||
seed = 42
|
||||
)
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
RateLimiter: leakybucket.NewCollector(1., 10, time.Second, false /* deleteEmptyBuckets */),
|
||||
}
|
||||
|
||||
randomSource := rand.New(rand.NewSource(seed))
|
||||
|
||||
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
"peer1": {
|
||||
{1}: {12: true, 13: true},
|
||||
{2}: {13: true, 14: true, 15: true},
|
||||
{3}: {14: true, 15: true},
|
||||
},
|
||||
"peer2": {
|
||||
{1}: {13: true, 14: true},
|
||||
{2}: {13: true, 14: true, 15: true},
|
||||
{3}: {14: true, 16: true},
|
||||
},
|
||||
}
|
||||
|
||||
expected := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
"peer1": {
|
||||
{1}: {12: true},
|
||||
{3}: {15: true},
|
||||
},
|
||||
"peer2": {
|
||||
{1}: {13: true, 14: true},
|
||||
{2}: {13: true, 14: true, 15: true},
|
||||
{3}: {14: true, 16: true},
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := selectPeers(params, randomSource, count, indicesByRootByPeer)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for peerID := range expected {
|
||||
require.Equal(t, len(expected[peerID]), len(actual[peerID]))
|
||||
for root := range expected[peerID] {
|
||||
require.Equal(t, len(expected[peerID][root]), len(actual[peerID][root]))
|
||||
for indices := range expected[peerID][root] {
|
||||
require.Equal(t, expected[peerID][root][indices], actual[peerID][root][indices])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateResults(t *testing.T) {
|
||||
_, verifiedSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{
|
||||
{Slot: 1, Index: 12, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 1, Index: 13, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 2, Index: 13, Column: [][]byte{{1}, {2}, {3}}},
|
||||
{Slot: 2, Index: 14, Column: [][]byte{{1}, {2}, {3}}},
|
||||
})
|
||||
|
||||
missingIndicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
verifiedSidecars[0].BlockRoot(): {12: true, 13: true},
|
||||
verifiedSidecars[2].BlockRoot(): {13: true, 14: true, 15: true},
|
||||
}
|
||||
|
||||
expectedMissingIndicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
verifiedSidecars[2].BlockRoot(): {15: true},
|
||||
}
|
||||
|
||||
expectedVerifiedSidecarsByRoot := map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn{
|
||||
verifiedSidecars[0].BlockRoot(): {verifiedSidecars[0], verifiedSidecars[1]},
|
||||
verifiedSidecars[2].BlockRoot(): {verifiedSidecars[2], verifiedSidecars[3]},
|
||||
}
|
||||
|
||||
actualVerifiedSidecarsByRoot := updateResults(verifiedSidecars, missingIndicesByRoot)
|
||||
require.DeepEqual(t, expectedMissingIndicesByRoot, missingIndicesByRoot)
|
||||
require.DeepEqual(t, expectedVerifiedSidecarsByRoot, actualVerifiedSidecarsByRoot)
|
||||
}
|
||||
|
||||
func TestFetchDataColumnSidecarsFromPeers(t *testing.T) {
|
||||
const count = 4
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
clock := startup.NewClock(time.Now(), [fieldparams.RootLength]byte{})
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
expectedResponseSidecarPb := ðpb.DataColumnSidecar{
|
||||
Index: 2,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
expectedResponseSidecar, err := blocks.NewRODataColumn(expectedResponseSidecarPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
{1}: 1,
|
||||
{3}: 3,
|
||||
{4}: 4,
|
||||
{7}: 7,
|
||||
}
|
||||
|
||||
slotsWithCommitments := map[primitives.Slot]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
4: true,
|
||||
7: true,
|
||||
}
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 1,
|
||||
Count: 7,
|
||||
Columns: []uint64{1, 2},
|
||||
}
|
||||
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
|
||||
p2p.Connect(other)
|
||||
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
receivedRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, receivedRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponseSidecarPb)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
other.PeerID(): {
|
||||
{1}: {1: true, 2: true},
|
||||
{3}: {1: true, 2: true},
|
||||
{4}: {1: true, 2: true},
|
||||
{7}: {1: true, 2: true},
|
||||
},
|
||||
}
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
Tor: clock,
|
||||
P2P: p2p,
|
||||
CtxMap: ctxMap,
|
||||
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
|
||||
}
|
||||
|
||||
expectedResponse := map[peer.ID][]blocks.RODataColumn{
|
||||
other.PeerID(): {expectedResponseSidecar},
|
||||
}
|
||||
|
||||
actualResponse := fetchDataColumnSidecarsFromPeers(params, slotByRoot, slotsWithCommitments, indicesByRootByPeer)
|
||||
require.Equal(t, len(expectedResponse), len(actualResponse))
|
||||
|
||||
for peerID := range expectedResponse {
|
||||
require.DeepSSZEqual(t, expectedResponse[peerID], actualResponse[peerID])
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendDataColumnSidecarsRequest(t *testing.T) {
|
||||
const count = 4
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.FuluForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
kzgCommitmentsInclusionProof := make([][]byte, 0, count)
|
||||
for range count {
|
||||
kzgCommitmentsInclusionProof = append(kzgCommitmentsInclusionProof, make([]byte, 32))
|
||||
}
|
||||
|
||||
expectedResponsePb := ðpb.DataColumnSidecar{
|
||||
Index: 2,
|
||||
SignedBlockHeader: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ParentRoot: make([]byte, fieldparams.RootLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
BodyRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
expectedResponse, err := blocks.NewRODataColumn(expectedResponsePb)
|
||||
require.NoError(t, err)
|
||||
|
||||
clock := startup.NewClock(time.Now(), params.BeaconConfig().GenesisValidatorsRoot)
|
||||
ctxMap, err := ContextByteVersionsForValRoot(params.BeaconConfig().GenesisValidatorsRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("contiguous", func(t *testing.T) {
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
{1}: {1: true, 2: true},
|
||||
{3}: {1: true, 2: true},
|
||||
{4}: {1: true, 2: true},
|
||||
{7}: {1: true, 2: true},
|
||||
}
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
{1}: 1,
|
||||
{3}: 3,
|
||||
{4}: 4,
|
||||
{7}: 7,
|
||||
}
|
||||
|
||||
slotsWithCommitments := map[primitives.Slot]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
4: true,
|
||||
7: true,
|
||||
}
|
||||
|
||||
expectedRequest := ðpb.DataColumnSidecarsByRangeRequest{
|
||||
StartSlot: 1,
|
||||
Count: 7,
|
||||
Columns: []uint64{1, 2},
|
||||
}
|
||||
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRangeTopicV1)
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
|
||||
p2p.Connect(other)
|
||||
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
receivedRequest := new(ethpb.DataColumnSidecarsByRangeRequest)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepEqual(t, expectedRequest, receivedRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponsePb)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
Tor: clock,
|
||||
P2P: p2p,
|
||||
CtxMap: ctxMap,
|
||||
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
|
||||
}
|
||||
|
||||
actualResponse, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, other.PeerID(), indicesByRoot)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedResponse, actualResponse[0])
|
||||
})
|
||||
|
||||
t.Run("non contiguous", func(t *testing.T) {
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
expectedResponse.BlockRoot(): {1: true, 2: true},
|
||||
{4}: {1: true, 2: true},
|
||||
{7}: {1: true, 2: true},
|
||||
}
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
expectedResponse.BlockRoot(): 1,
|
||||
{4}: 4,
|
||||
{7}: 7,
|
||||
}
|
||||
|
||||
slotsWithCommitments := map[primitives.Slot]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
4: true,
|
||||
7: true,
|
||||
}
|
||||
|
||||
roots := [...][fieldparams.RootLength]byte{expectedResponse.BlockRoot(), {4}, {7}}
|
||||
|
||||
expectedRequest := &p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{
|
||||
BlockRoot: roots[1][:],
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
BlockRoot: roots[2][:],
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
BlockRoot: roots[0][:],
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
}
|
||||
|
||||
protocol := fmt.Sprintf("%s/ssz_snappy", p2p.RPCDataColumnSidecarsByRootTopicV1)
|
||||
p2p, other := testp2p.NewTestP2P(t), testp2p.NewTestP2P(t)
|
||||
p2p.Connect(other)
|
||||
|
||||
other.SetStreamHandler(protocol, func(stream network.Stream) {
|
||||
receivedRequest := new(p2ptypes.DataColumnsByRootIdentifiers)
|
||||
err := other.Encoding().DecodeWithMaxLength(stream, receivedRequest)
|
||||
assert.NoError(t, err)
|
||||
assert.DeepSSZEqual(t, *expectedRequest, *receivedRequest)
|
||||
|
||||
err = WriteDataColumnSidecarChunk(stream, clock, other.Encoding(), expectedResponsePb)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = stream.CloseWrite()
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
params := DataColumnSidecarsParams{
|
||||
Ctx: t.Context(),
|
||||
Tor: clock,
|
||||
P2P: p2p,
|
||||
CtxMap: ctxMap,
|
||||
RateLimiter: leakybucket.NewCollector(1., 1, time.Second, false /* deleteEmptyBuckets */),
|
||||
}
|
||||
|
||||
actualResponse, err := sendDataColumnSidecarsRequest(params, slotByRoot, slotsWithCommitments, other.PeerID(), indicesByRoot)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expectedResponse, actualResponse[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildByRangeRequests(t *testing.T) {
|
||||
const nullBatchSize = 0
|
||||
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
actual, err := buildByRangeRequests(nil, nil, nil, nullBatchSize)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 0, len(actual))
|
||||
})
|
||||
|
||||
t.Run("missing Root", func(t *testing.T) {
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
{1}: {1: true, 2: true},
|
||||
}
|
||||
|
||||
_, err := buildByRangeRequests(nil, nil, indicesByRoot, nullBatchSize)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("indices differ", func(t *testing.T) {
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
{1}: {1: true, 2: true},
|
||||
{2}: {1: true, 2: true},
|
||||
{3}: {2: true, 3: true},
|
||||
}
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
{1}: 1,
|
||||
{2}: 2,
|
||||
{3}: 3,
|
||||
}
|
||||
|
||||
actual, err := buildByRangeRequests(slotByRoot, nil, indicesByRoot, nullBatchSize)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(actual))
|
||||
})
|
||||
|
||||
t.Run("slots non contiguous", func(t *testing.T) {
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
{1}: {1: true, 2: true},
|
||||
{2}: {1: true, 2: true},
|
||||
}
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
{1}: 1,
|
||||
{2}: 3,
|
||||
}
|
||||
|
||||
slotsWithCommitments := map[primitives.Slot]bool{
|
||||
1: true,
|
||||
2: true,
|
||||
3: true,
|
||||
}
|
||||
|
||||
actual, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, nullBatchSize)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(actual))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const batchSize = 3
|
||||
|
||||
indicesByRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
{1}: {1: true, 2: true},
|
||||
{3}: {1: true, 2: true},
|
||||
{4}: {1: true, 2: true},
|
||||
{7}: {1: true, 2: true},
|
||||
}
|
||||
|
||||
slotByRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
{1}: 1,
|
||||
{3}: 3,
|
||||
{4}: 4,
|
||||
{7}: 7,
|
||||
}
|
||||
|
||||
slotsWithCommitments := map[primitives.Slot]bool{
|
||||
1: true,
|
||||
3: true,
|
||||
4: true,
|
||||
7: true,
|
||||
}
|
||||
|
||||
expected := []*ethpb.DataColumnSidecarsByRangeRequest{
|
||||
{
|
||||
StartSlot: 1,
|
||||
Count: 3,
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
StartSlot: 4,
|
||||
Count: 3,
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
StartSlot: 7,
|
||||
Count: 1,
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
}
|
||||
|
||||
actual, err := buildByRangeRequests(slotByRoot, slotsWithCommitments, indicesByRoot, batchSize)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildByRootRequest(t *testing.T) {
|
||||
root1 := [fieldparams.RootLength]byte{1}
|
||||
root2 := [fieldparams.RootLength]byte{2}
|
||||
|
||||
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
root1: {1: true, 2: true},
|
||||
root2: {3: true},
|
||||
}
|
||||
|
||||
expected := p2ptypes.DataColumnsByRootIdentifiers{
|
||||
{
|
||||
BlockRoot: root1[:],
|
||||
Columns: []uint64{1, 2},
|
||||
},
|
||||
{
|
||||
BlockRoot: root2[:],
|
||||
Columns: []uint64{3},
|
||||
},
|
||||
}
|
||||
|
||||
actual := buildByRootRequest(input)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarsByPeer(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const (
|
||||
start, stop = 0, 15
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
p2p := testp2p.NewTestP2P(t)
|
||||
|
||||
// Setup test data and expectations
|
||||
_, roDataColumnSidecars, expected := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
roDataColumnsByPeer := map[peer.ID][]blocks.RODataColumn{
|
||||
"peer1": roDataColumnSidecars[start:5],
|
||||
"peer2": roDataColumnSidecars[5:9],
|
||||
"peer3": roDataColumnSidecars[9:stop],
|
||||
}
|
||||
gs := startup.NewClockSynchronizer()
|
||||
err := gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||
actual, err := verifyDataColumnSidecarsByPeer(p2p, newDataColumnsVerifier, roDataColumnsByPeer)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, stop-start, len(actual))
|
||||
|
||||
for i := range actual {
|
||||
actualSidecar := actual[i]
|
||||
index := actualSidecar.Index
|
||||
expectedSidecar := expected[index]
|
||||
require.DeepEqual(t, expectedSidecar, actualSidecar)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("one rogue peer", func(t *testing.T) {
|
||||
const (
|
||||
start, middle, stop = 0, 5, 15
|
||||
blobCount = 1
|
||||
)
|
||||
|
||||
p2p := testp2p.NewTestP2P(t)
|
||||
|
||||
// Setup test data and expectations
|
||||
_, roDataColumnSidecars, expected := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
// Modify one sidecar to ensure proof verification fails.
|
||||
if roDataColumnSidecars[middle].KzgProofs[0][0] == 0 {
|
||||
roDataColumnSidecars[middle].KzgProofs[0][0]++
|
||||
} else {
|
||||
roDataColumnSidecars[middle].KzgProofs[0][0]--
|
||||
}
|
||||
|
||||
roDataColumnsByPeer := map[peer.ID][]blocks.RODataColumn{
|
||||
"peer1": roDataColumnSidecars[start:middle],
|
||||
"peer2": roDataColumnSidecars[5:middle],
|
||||
"peer3": roDataColumnSidecars[middle:stop],
|
||||
}
|
||||
gs := startup.NewClockSynchronizer()
|
||||
err := gs.SetClock(startup.NewClock(time.Unix(4113849600, 0), [fieldparams.RootLength]byte{}))
|
||||
require.NoError(t, err)
|
||||
|
||||
waiter := verification.NewInitializerWaiter(gs, nil, nil)
|
||||
initializer, err := waiter.WaitForInitializer(t.Context())
|
||||
require.NoError(t, err)
|
||||
|
||||
newDataColumnsVerifier := newDataColumnsVerifierFromInitializer(initializer)
|
||||
actual, err := verifyDataColumnSidecarsByPeer(p2p, newDataColumnsVerifier, roDataColumnsByPeer)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, middle-start, len(actual))
|
||||
|
||||
for i := range actual {
|
||||
actualSidecar := actual[i]
|
||||
index := actualSidecar.Index
|
||||
expectedSidecar := expected[index]
|
||||
require.DeepEqual(t, expectedSidecar, actualSidecar)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeIndicesByRootByPeer(t *testing.T) {
|
||||
peerIdStrs := []string{
|
||||
"16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq", // Custodies 89, 94, 97 & 122
|
||||
"16Uiu2HAmTwQPAwzTr6hTgBmKNecCfH6kP3Kbzxj36ZRyyQ46L6gf", // Custodies 1, 11, 37 & 86
|
||||
"16Uiu2HAmMDB5uUePTpN7737m78ehePfWPtBL9qMGdH8kCygjzNA8", // Custodies 2, 37, 38 & 68
|
||||
"16Uiu2HAmTAE5Vxf7Pgfk7eWpmCvVJdSba4C9xg4xkYuuvnVbgfFx", // Custodies 10, 29, 36 & 108
|
||||
}
|
||||
|
||||
headSlotByPeer := map[string]primitives.Slot{
|
||||
"16Uiu2HAm3k5Npu6EaYWxiEvzsdLseEkjVyoVhvbxWEuyqdBgBBbq": 89,
|
||||
"16Uiu2HAmTwQPAwzTr6hTgBmKNecCfH6kP3Kbzxj36ZRyyQ46L6gf": 10,
|
||||
"16Uiu2HAmMDB5uUePTpN7737m78ehePfWPtBL9qMGdH8kCygjzNA8": 12,
|
||||
"16Uiu2HAmTAE5Vxf7Pgfk7eWpmCvVJdSba4C9xg4xkYuuvnVbgfFx": 9,
|
||||
}
|
||||
|
||||
p2p := testp2p.NewTestP2P(t)
|
||||
peers := p2p.Peers()
|
||||
|
||||
peerIDs := make([]peer.ID, 0, len(peerIdStrs))
|
||||
for _, peerIdStr := range peerIdStrs {
|
||||
peerID, err := peer.Decode(peerIdStr)
|
||||
require.NoError(t, err)
|
||||
|
||||
peers.SetChainState(peerID, ðpb.StatusV2{
|
||||
HeadSlot: headSlotByPeer[peerIdStr],
|
||||
})
|
||||
|
||||
peerIDs = append(peerIDs, peerID)
|
||||
}
|
||||
|
||||
slotByBlockRoot := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
[fieldparams.RootLength]byte{1}: 8,
|
||||
[fieldparams.RootLength]byte{2}: 10,
|
||||
[fieldparams.RootLength]byte{3}: 9,
|
||||
[fieldparams.RootLength]byte{4}: 50,
|
||||
}
|
||||
|
||||
indicesByBlockRoot := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
[fieldparams.RootLength]byte{1}: {3: true, 4: true, 5: true},
|
||||
[fieldparams.RootLength]byte{2}: {1: true, 10: true, 37: true, 80: true},
|
||||
[fieldparams.RootLength]byte{3}: {10: true, 38: true, 39: true, 40: true},
|
||||
[fieldparams.RootLength]byte{4}: {89: true, 108: true, 122: true},
|
||||
}
|
||||
|
||||
expected := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
peerIDs[0]: {
|
||||
[fieldparams.RootLength]byte{4}: {89: true, 122: true},
|
||||
},
|
||||
peerIDs[1]: {
|
||||
[fieldparams.RootLength]byte{2}: {1: true, 37: true},
|
||||
},
|
||||
peerIDs[2]: {
|
||||
[fieldparams.RootLength]byte{2}: {37: true},
|
||||
[fieldparams.RootLength]byte{3}: {38: true},
|
||||
},
|
||||
peerIDs[3]: {
|
||||
[fieldparams.RootLength]byte{3}: {10: true},
|
||||
},
|
||||
}
|
||||
|
||||
peerIDsMap := make(map[peer.ID]bool, len(peerIDs))
|
||||
for _, id := range peerIDs {
|
||||
peerIDsMap[id] = true
|
||||
}
|
||||
|
||||
actual, err := computeIndicesByRootByPeer(p2p, slotByBlockRoot, indicesByBlockRoot, peerIDsMap)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
|
||||
for peer, indicesByRoot := range expected {
|
||||
require.Equal(t, len(indicesByRoot), len(actual[peer]))
|
||||
for root, indices := range indicesByRoot {
|
||||
require.Equal(t, len(indices), len(actual[peer][root]))
|
||||
for index := range indices {
|
||||
require.Equal(t, actual[peer][root][index], true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRandomPeer(t *testing.T) {
|
||||
// Fixed seed.
|
||||
const seed = 43
|
||||
randomSource := rand.New(rand.NewSource(seed))
|
||||
|
||||
t.Run("no peers", func(t *testing.T) {
|
||||
pid, err := randomPeer(t.Context(), randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, nil)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, peer.ID(""), pid)
|
||||
})
|
||||
|
||||
t.Run("context cancelled", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{peer.ID("peer1"): {}}
|
||||
pid, err := randomPeer(ctx, randomSource, leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */), 1, indicesByRootByPeer)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, peer.ID(""), pid)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const count = 1
|
||||
collector := leakybucket.NewCollector(4, 8, time.Second, false /* deleteEmptyBuckets */)
|
||||
peer1, peer2, peer3 := peer.ID("peer1"), peer.ID("peer2"), peer.ID("peer3")
|
||||
|
||||
indicesByRootByPeer := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
peer1: {},
|
||||
peer2: {},
|
||||
peer3: {},
|
||||
}
|
||||
|
||||
pid, err := randomPeer(t.Context(), randomSource, collector, count, indicesByRootByPeer)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, peer1, pid)
|
||||
|
||||
pid, err = randomPeer(t.Context(), randomSource, collector, count, indicesByRootByPeer)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, peer2, pid)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCopyIndicesByRootByPeer(t *testing.T) {
|
||||
original := map[peer.ID]map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
peer.ID("peer1"): {
|
||||
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
|
||||
[fieldparams.RootLength]byte{2}: {2: true},
|
||||
},
|
||||
peer.ID("peer2"): {
|
||||
[fieldparams.RootLength]byte{1}: {1: true},
|
||||
},
|
||||
}
|
||||
|
||||
copied := copyIndicesByRootByPeer(original)
|
||||
|
||||
require.Equal(t, len(original), len(copied))
|
||||
for peer, indicesByRoot := range original {
|
||||
require.Equal(t, len(indicesByRoot), len(copied[peer]))
|
||||
for root, indices := range indicesByRoot {
|
||||
require.Equal(t, len(indices), len(copied[peer][root]))
|
||||
for index := range indices {
|
||||
require.Equal(t, copied[peer][root][index], true)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompareIndices(t *testing.T) {
|
||||
left := map[uint64]bool{3: true, 5: true, 7: true}
|
||||
right := map[uint64]bool{5: true}
|
||||
require.Equal(t, false, compareIndices(left, right))
|
||||
|
||||
left = map[uint64]bool{3: true, 5: true, 7: true}
|
||||
right = map[uint64]bool{3: true, 6: true, 7: true}
|
||||
require.Equal(t, false, compareIndices(left, right))
|
||||
|
||||
left = map[uint64]bool{3: true, 5: true, 7: true}
|
||||
right = map[uint64]bool{5: true, 7: true, 3: true}
|
||||
require.Equal(t, true, compareIndices(left, right))
|
||||
}
|
||||
|
||||
func TestSlortedSliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{54: true, 23: true, 35: true}
|
||||
expected := []uint64{23, 35, 54}
|
||||
actual := sortedSliceFromMap(input)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestComputeSlotByBlockRoot(t *testing.T) {
|
||||
const (
|
||||
count = 3
|
||||
multiplier = 10
|
||||
)
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, count)
|
||||
for i := range count {
|
||||
signedBlock := util.NewBeaconBlock()
|
||||
signedBlock.Block.Slot = primitives.Slot(i).Mul(multiplier)
|
||||
roSignedBlock, err := blocks.NewSignedBeaconBlock(signedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(roSignedBlock, [fieldparams.RootLength]byte{byte(i)})
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlocks = append(roBlocks, roBlock)
|
||||
}
|
||||
|
||||
expected := map[[fieldparams.RootLength]byte]primitives.Slot{
|
||||
[fieldparams.RootLength]byte{0}: primitives.Slot(0),
|
||||
[fieldparams.RootLength]byte{1}: primitives.Slot(10),
|
||||
[fieldparams.RootLength]byte{2}: primitives.Slot(20),
|
||||
}
|
||||
|
||||
actual := computeSlotByBlockRoot(roBlocks)
|
||||
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
for k, v := range expected {
|
||||
require.Equal(t, v, actual[k])
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeTotalCount(t *testing.T) {
|
||||
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
|
||||
[fieldparams.RootLength]byte{2}: {2: true},
|
||||
}
|
||||
|
||||
const expected = 3
|
||||
actual := computeTotalCount(input)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
@@ -44,7 +44,7 @@ func (s *Service) reconstructSaveBroadcastDataColumnSidecars(
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If reconstruction is not possible or if all columns are already stored, exit early.
|
||||
if storedColumnsCount < peerdas.MinimumColumnsCountToReconstruct() || storedColumnsCount == numberOfColumns {
|
||||
if storedColumnsCount < peerdas.MinimumColumnCountToReconstruct() || storedColumnsCount == numberOfColumns {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -198,7 +198,7 @@ func (s *Service) broadcastMissingDataColumnSidecars(
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(verifiedRODataColumn.Index)
|
||||
|
||||
// Broadcast the missing data column.
|
||||
if err := s.cfg.p2p.BroadcastDataColumn(root, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
|
||||
if err := s.cfg.p2p.BroadcastDataColumnSidecar(root, subnet, verifiedRODataColumn.DataColumnSidecar); err != nil {
|
||||
log.WithError(err).Error("Broadcast data column")
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestReconstructDataColumns(t *testing.T) {
|
||||
root, block := roBlock.Root(), roBlock.Block()
|
||||
slot, proposerIndex := block.Slot(), block.ProposerIndex()
|
||||
|
||||
minimumCount := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumCount := peerdas.MinimumColumnCountToReconstruct()
|
||||
|
||||
t.Run("not enough stored sidecars", func(t *testing.T) {
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
@@ -61,7 +61,7 @@ func TestReconstructDataColumns(t *testing.T) {
|
||||
const cgc = 8
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
minimumCount := peerdas.MinimumColumnsCountToReconstruct()
|
||||
minimumCount := peerdas.MinimumColumnCountToReconstruct()
|
||||
err := storage.Save(verifiedRoDataColumns[:minimumCount])
|
||||
require.NoError(t, err)
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
@@ -33,6 +34,7 @@ go_library(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -72,7 +74,9 @@ go_test(
|
||||
deps = [
|
||||
"//async/abool:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
@@ -89,6 +93,7 @@ go_test(
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -104,7 +109,9 @@ go_test(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_paulbellamy_ratecounter//:go_default_library",
|
||||
|
||||
@@ -3,11 +3,13 @@ package initialsync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -15,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
prysmsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync/verify"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -34,7 +37,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// maxPendingRequests limits how many concurrent fetch request one can initiate.
|
||||
maxPendingRequests = 64
|
||||
// peersPercentagePerRequest caps percentage of peers to be used in a request.
|
||||
@@ -78,6 +80,8 @@ type blocksFetcherConfig struct {
|
||||
peerFilterCapacityWeight float64
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageReader
|
||||
cv verification.NewDataColumnsVerifier
|
||||
}
|
||||
|
||||
// blocksFetcher is a service to fetch chain data from peers.
|
||||
@@ -94,6 +98,8 @@ type blocksFetcher struct {
|
||||
p2p p2p.P2P
|
||||
db db.ReadOnlyDatabase
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageReader
|
||||
cv verification.NewDataColumnsVerifier
|
||||
blocksPerPeriod uint64
|
||||
rateLimiter *leakybucket.Collector
|
||||
peerLocks map[peer.ID]*peerLock
|
||||
@@ -124,7 +130,7 @@ type fetchRequestResponse struct {
|
||||
blobsFrom peer.ID
|
||||
start primitives.Slot
|
||||
count uint64
|
||||
bwb []blocks.BlockWithROBlobs
|
||||
bwb []blocks.BlockWithROSidecars
|
||||
err error
|
||||
}
|
||||
|
||||
@@ -162,6 +168,8 @@ func newBlocksFetcher(ctx context.Context, cfg *blocksFetcherConfig) *blocksFetc
|
||||
p2p: cfg.p2p,
|
||||
db: cfg.db,
|
||||
bs: cfg.bs,
|
||||
dcs: cfg.dcs,
|
||||
cv: cfg.cv,
|
||||
blocksPerPeriod: uint64(blocksPerPeriod),
|
||||
rateLimiter: rateLimiter,
|
||||
peerLocks: make(map[peer.ID]*peerLock),
|
||||
@@ -298,7 +306,7 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
||||
response := &fetchRequestResponse{
|
||||
start: start,
|
||||
count: count,
|
||||
bwb: []blocks.BlockWithROBlobs{},
|
||||
bwb: []blocks.BlockWithROSidecars{},
|
||||
err: nil,
|
||||
}
|
||||
|
||||
@@ -317,30 +325,114 @@ func (f *blocksFetcher) handleRequest(ctx context.Context, start primitives.Slot
|
||||
if f.mode == modeStopOnFinalizedEpoch {
|
||||
highestFinalizedSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(targetEpoch + 1))
|
||||
if start > highestFinalizedSlot {
|
||||
response.err = fmt.Errorf("%w, slot: %d, highest finalized slot: %d",
|
||||
errSlotIsTooHigh, start, highestFinalizedSlot)
|
||||
response.err = fmt.Errorf(
|
||||
"%w, slot: %d, highest finalized slot: %d",
|
||||
errSlotIsTooHigh, start, highestFinalizedSlot,
|
||||
)
|
||||
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
||||
response.bwb, response.blocksFrom, response.err = f.fetchBlocksFromPeer(ctx, start, count, peers)
|
||||
if response.err == nil {
|
||||
pid, bwb, err := f.fetchBlobsFromPeer(ctx, response.bwb, response.blocksFrom, peers)
|
||||
pid, err := f.fetchSidecars(ctx, response.blocksFrom, peers, response.bwb)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch sidecars")
|
||||
response.err = err
|
||||
}
|
||||
response.bwb = bwb
|
||||
|
||||
response.blobsFrom = pid
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer.
|
||||
// fetchSidecars fetches sidecars corresponding to blocks in `response.bwb`.
|
||||
// It mutates `Blobs` and `Columns` fields of `response.bwb` with fetched sidecars.
|
||||
// `pid` is the initial peer to request blob from (usually the peer from which the block originated),
|
||||
// `peers` is a list of peers to use for the request blobs if `pid` fails.
|
||||
// `bwScs` must me sorted by slot.
|
||||
// It returns the peer ID from which blobs were fetched (if any).
|
||||
func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []peer.ID, bwScs []blocks.BlockWithROSidecars) (peer.ID, error) {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
if len(bwScs) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
firstFuluIndex, err := findFirstFuluIndex(bwScs)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "find first Fulu index")
|
||||
}
|
||||
|
||||
preFulu := bwScs[:firstFuluIndex]
|
||||
postFulu := bwScs[firstFuluIndex:]
|
||||
|
||||
var blobsPid peer.ID
|
||||
|
||||
if len(preFulu) > 0 {
|
||||
// Fetch blob sidecars.
|
||||
blobsPid, err = f.fetchBlobsFromPeer(ctx, preFulu, pid, peers)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "fetch blobs from peer")
|
||||
}
|
||||
}
|
||||
|
||||
if len(postFulu) == 0 {
|
||||
return blobsPid, nil
|
||||
}
|
||||
|
||||
// Compute the columns to request.
|
||||
custodyGroupCount, err := f.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return blobsPid, errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
info, _, err := peerdas.Info(f.p2p.NodeID(), samplingSize)
|
||||
if err != nil {
|
||||
return blobsPid, errors.Wrap(err, "custody info")
|
||||
}
|
||||
|
||||
params := prysmsync.DataColumnSidecarsParams{
|
||||
Ctx: ctx,
|
||||
Tor: f.clock,
|
||||
P2P: f.p2p,
|
||||
RateLimiter: f.rateLimiter,
|
||||
CtxMap: f.ctxMap,
|
||||
Storage: f.dcs,
|
||||
NewVerifier: f.cv,
|
||||
}
|
||||
|
||||
roBlocks := make([]blocks.ROBlock, 0, len(postFulu))
|
||||
for _, block := range postFulu {
|
||||
roBlocks = append(roBlocks, block.Block)
|
||||
}
|
||||
|
||||
verifiedRoDataColumnsByRoot, err := prysmsync.FetchDataColumnSidecars(params, roBlocks, info.CustodyColumns)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "fetch data column sidecars")
|
||||
}
|
||||
|
||||
// Populate the response.
|
||||
for i := range bwScs {
|
||||
bwSc := &bwScs[i]
|
||||
root := bwSc.Block.Root()
|
||||
if columns, ok := verifiedRoDataColumnsByRoot[root]; ok {
|
||||
bwSc.Columns = columns
|
||||
}
|
||||
}
|
||||
|
||||
return blobsPid, nil
|
||||
}
|
||||
|
||||
// fetchBlocksFromPeer fetches blocks from a single randomly selected peer, sorted by slot.
|
||||
func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
ctx context.Context,
|
||||
start primitives.Slot, count uint64,
|
||||
peers []peer.ID,
|
||||
) ([]blocks.BlockWithROBlobs, peer.ID, error) {
|
||||
) ([]blocks.BlockWithROSidecars, peer.ID, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlocksFromPeer")
|
||||
defer span.End()
|
||||
|
||||
@@ -355,8 +447,7 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
// peers are dialed first.
|
||||
peers = append(bestPeers, peers...)
|
||||
peers = dedupPeers(peers)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
for _, p := range peers {
|
||||
blocks, err := f.requestBlocks(ctx, req, p)
|
||||
if err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Could not request blocks by range from peer")
|
||||
@@ -380,14 +471,14 @@ func (f *blocksFetcher) fetchBlocksFromPeer(
|
||||
return nil, "", errNoPeersAvailable
|
||||
}
|
||||
|
||||
func sortedBlockWithVerifiedBlobSlice(bs []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROBlobs, error) {
|
||||
rb := make([]blocks.BlockWithROBlobs, len(bs))
|
||||
for i, b := range bs {
|
||||
func sortedBlockWithVerifiedBlobSlice(blks []interfaces.ReadOnlySignedBeaconBlock) ([]blocks.BlockWithROSidecars, error) {
|
||||
rb := make([]blocks.BlockWithROSidecars, len(blks))
|
||||
for i, b := range blks {
|
||||
ro, err := blocks.NewROBlock(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rb[i] = blocks.BlockWithROBlobs{Block: ro}
|
||||
rb[i] = blocks.BlockWithROSidecars{Block: ro}
|
||||
}
|
||||
sort.Sort(blocks.BlockWithROBlobsSlice(rb))
|
||||
return rb, nil
|
||||
@@ -403,7 +494,8 @@ type commitmentCountList []commitmentCount
|
||||
|
||||
// countCommitments makes a list of all blocks that have commitments that need to be satisfied.
|
||||
// This gives us a representation to finish building the request that is lightweight and readable for testing.
|
||||
func countCommitments(bwb []blocks.BlockWithROBlobs, retentionStart primitives.Slot) commitmentCountList {
|
||||
// `bwb` must be sorted by slot.
|
||||
func countCommitments(bwb []blocks.BlockWithROSidecars, retentionStart primitives.Slot) commitmentCountList {
|
||||
if len(bwb) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -485,7 +577,9 @@ func (r *blobRange) Request() *p2ppb.BlobSidecarsByRangeRequest {
|
||||
var errBlobVerification = errors.New("peer unable to serve aligned BlobSidecarsByRange and BeaconBlockSidecarsByRange responses")
|
||||
var errMissingBlobsForBlockCommitments = errors.Wrap(errBlobVerification, "blobs unavailable for processing block with kzg commitments")
|
||||
|
||||
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) ([]blocks.BlockWithROBlobs, error) {
|
||||
// verifyAndPopulateBlobs mutate the input `bwb` argument by adding verified blobs.
|
||||
// This function mutates the input `bwb` argument.
|
||||
func verifyAndPopulateBlobs(bwb []blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
|
||||
blobsByRoot := make(map[[32]byte][]blocks.ROBlob)
|
||||
for i := range blobs {
|
||||
if blobs[i].Slot() < req.StartSlot {
|
||||
@@ -495,46 +589,53 @@ func verifyAndPopulateBlobs(bwb []blocks.BlockWithROBlobs, blobs []blocks.ROBlob
|
||||
blobsByRoot[br] = append(blobsByRoot[br], blobs[i])
|
||||
}
|
||||
for i := range bwb {
|
||||
bwi, err := populateBlock(bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
||||
err := populateBlock(&bwb[i], blobsByRoot[bwb[i].Block.Root()], req, bss)
|
||||
if err != nil {
|
||||
if errors.Is(err, errDidntPopulate) {
|
||||
continue
|
||||
}
|
||||
return bwb, err
|
||||
return err
|
||||
}
|
||||
bwb[i] = bwi
|
||||
}
|
||||
return bwb, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var errDidntPopulate = errors.New("skipping population of block")
|
||||
|
||||
func populateBlock(bw blocks.BlockWithROBlobs, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) (blocks.BlockWithROBlobs, error) {
|
||||
// populateBlock verifies and populates blobs for a block.
|
||||
// This function mutates the input `bw` argument.
|
||||
func populateBlock(bw *blocks.BlockWithROSidecars, blobs []blocks.ROBlob, req *p2ppb.BlobSidecarsByRangeRequest, bss filesystem.BlobStorageSummarizer) error {
|
||||
blk := bw.Block
|
||||
if blk.Version() < version.Deneb || blk.Block().Slot() < req.StartSlot {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
commits, err := blk.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
if len(commits) == 0 {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
// Drop blobs on the floor if we already have them.
|
||||
if bss != nil && bss.Summary(blk.Root()).AllAvailable(len(commits)) {
|
||||
return bw, errDidntPopulate
|
||||
return errDidntPopulate
|
||||
}
|
||||
|
||||
if len(commits) != len(blobs) {
|
||||
return bw, missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
||||
return missingCommitError(blk.Root(), blk.Block().Slot(), commits)
|
||||
}
|
||||
|
||||
for ci := range commits {
|
||||
if err := verify.BlobAlignsWithBlock(blobs[ci], blk); err != nil {
|
||||
return bw, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bw.Blobs = blobs
|
||||
return bw, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) error {
|
||||
@@ -547,29 +648,38 @@ func missingCommitError(root [32]byte, slot primitives.Slot, missing [][]byte) e
|
||||
}
|
||||
|
||||
// fetchBlobsFromPeer fetches blocks from a single randomly selected peer.
|
||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROBlobs, pid peer.ID, peers []peer.ID) (peer.ID, []blocks.BlockWithROBlobs, error) {
|
||||
// This function mutates the input `bwb` argument.
|
||||
// `pid` is the initial peer to request blobs from (usually the peer from which the block originated),
|
||||
// `peers` is a list of peers to use for the request if `pid` fails.
|
||||
// `bwb` must be sorted by slot.
|
||||
// It returns the peer ID from which blobs were fetched.
|
||||
func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.BlockWithROSidecars, pid peer.ID, peers []peer.ID) (peer.ID, error) {
|
||||
if len(bwb) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "initialsync.fetchBlobsFromPeer")
|
||||
defer span.End()
|
||||
if slots.ToEpoch(f.clock.CurrentSlot()) < params.BeaconConfig().DenebForkEpoch {
|
||||
return "", bwb, nil
|
||||
return "", nil
|
||||
}
|
||||
blobWindowStart, err := prysmsync.BlobRPCMinValidSlot(f.clock.CurrentSlot())
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
return "", err
|
||||
}
|
||||
// Construct request message based on observed interval of blocks in need of blobs.
|
||||
req := countCommitments(bwb, blobWindowStart).blobRange(f.bs).Request()
|
||||
if req == nil {
|
||||
return "", bwb, nil
|
||||
return "", nil
|
||||
}
|
||||
peers = f.filterPeers(ctx, peers, peersPercentagePerRequest)
|
||||
// We dial the initial peer first to ensure that we get the desired set of blobs.
|
||||
wantedPeers := append([]peer.ID{pid}, peers...)
|
||||
bestPeers := f.hasSufficientBandwidth(wantedPeers, req.Count)
|
||||
peers = append([]peer.ID{pid}, peers...)
|
||||
peers = f.hasSufficientBandwidth(peers, req.Count)
|
||||
// We append the best peers to the front so that higher capacity
|
||||
// peers are dialed first. If all of them fail, we fallback to the
|
||||
// initial peer we wanted to request blobs from.
|
||||
peers = append(bestPeers, pid)
|
||||
peers = append(peers, pid)
|
||||
for i := 0; i < len(peers); i++ {
|
||||
p := peers[i]
|
||||
blobs, err := f.requestBlobs(ctx, req, p)
|
||||
@@ -578,14 +688,24 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.Blo
|
||||
continue
|
||||
}
|
||||
f.p2p.Peers().Scorers().BlockProviderScorer().Touch(p)
|
||||
robs, err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs)
|
||||
if err != nil {
|
||||
if err := verifyAndPopulateBlobs(bwb, blobs, req, f.bs); err != nil {
|
||||
log.WithField("peer", p).WithError(err).Debug("Invalid BeaconBlobsByRange response")
|
||||
continue
|
||||
}
|
||||
return p, robs, err
|
||||
return p, err
|
||||
}
|
||||
return "", nil, errNoPeersAvailable
|
||||
return "", errNoPeersAvailable
|
||||
}
|
||||
|
||||
// sortedSliceFromMap returns a sorted slice of keys from a map.
|
||||
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
slices.Sort(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
||||
@@ -642,6 +762,7 @@ func (f *blocksFetcher) requestBlobs(ctx context.Context, req *p2ppb.BlobSidecar
|
||||
}
|
||||
f.rateLimiter.Add(pid.String(), int64(req.Count))
|
||||
l.Unlock()
|
||||
|
||||
return prysmsync.SendBlobsByRangeRequest(ctx, f.clock, f.p2p, pid, f.ctxMap, req)
|
||||
}
|
||||
|
||||
@@ -699,13 +820,17 @@ func (f *blocksFetcher) waitForBandwidth(pid peer.ID, count uint64) error {
|
||||
}
|
||||
|
||||
func (f *blocksFetcher) hasSufficientBandwidth(peers []peer.ID, count uint64) []peer.ID {
|
||||
filteredPeers := []peer.ID{}
|
||||
for _, p := range peers {
|
||||
if uint64(f.rateLimiter.Remaining(p.String())) < count {
|
||||
filteredPeers := make([]peer.ID, 0, len(peers))
|
||||
|
||||
for _, peer := range peers {
|
||||
remaining := uint64(0)
|
||||
if remainingInt := f.rateLimiter.Remaining(peer.String()); remainingInt > 0 {
|
||||
remaining = uint64(remainingInt)
|
||||
}
|
||||
if remaining < count {
|
||||
continue
|
||||
}
|
||||
copiedP := p
|
||||
filteredPeers = append(filteredPeers, copiedP)
|
||||
filteredPeers = append(filteredPeers, peer)
|
||||
}
|
||||
return filteredPeers
|
||||
}
|
||||
@@ -745,3 +870,23 @@ func dedupPeers(peers []peer.ID) []peer.ID {
|
||||
}
|
||||
return newPeerList
|
||||
}
|
||||
|
||||
// findFirstFuluIndex returns the index of the first block with a version >= Fulu.
|
||||
// It returns an error if blocks are not correctly sorted by version regarding Fulu.
|
||||
func findFirstFuluIndex(bwScs []blocks.BlockWithROSidecars) (int, error) {
|
||||
firstFuluIndex := len(bwScs)
|
||||
|
||||
for i, bwSc := range bwScs {
|
||||
blockVersion := bwSc.Block.Version()
|
||||
if blockVersion >= version.Fulu && firstFuluIndex > i {
|
||||
firstFuluIndex = i
|
||||
continue
|
||||
}
|
||||
|
||||
if blockVersion < version.Fulu && firstFuluIndex <= i {
|
||||
return 0, errors.New("blocks are not sorted by version")
|
||||
}
|
||||
}
|
||||
|
||||
return firstFuluIndex, nil
|
||||
}
|
||||
|
||||
@@ -12,11 +12,12 @@ import (
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pm "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
beaconsync "github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -266,7 +267,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
|
||||
p := p2pt.NewTestP2P(t)
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
connectPeers(t, p, tt.peers, p.Peers())
|
||||
cache.RLock()
|
||||
genesisRoot := cache.rootCache[0]
|
||||
@@ -307,9 +308,9 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
fetcher.stop()
|
||||
}()
|
||||
|
||||
processFetchedBlocks := func() ([]blocks.BlockWithROBlobs, error) {
|
||||
processFetchedBlocks := func() ([]blocks.BlockWithROSidecars, error) {
|
||||
defer cancel()
|
||||
var unionRespBlocks []blocks.BlockWithROBlobs
|
||||
var unionRespBlocks []blocks.BlockWithROSidecars
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -398,6 +399,7 @@ func TestBlocksFetcher_scheduleRequest(t *testing.T) {
|
||||
fetcher.scheduleRequest(t.Context(), 1, blockBatchLimit))
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
blockBatchLimit := flags.Get().BlockBatchLimit
|
||||
chainConfig := struct {
|
||||
@@ -455,7 +457,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
var bwb []blocks.BlockWithROBlobs
|
||||
var bwb []blocks.BlockWithROSidecars
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Error(ctx.Err())
|
||||
@@ -531,9 +533,9 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p3 := p2pt.NewTestP2P(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p3 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
p1.Connect(p3)
|
||||
require.Equal(t, 2, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
@@ -543,7 +545,7 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
Count: 64,
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
topic := p2p.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||
streamHandlerFn := func(stream network.Stream) {
|
||||
assert.NoError(t, stream.Close())
|
||||
@@ -602,15 +604,15 @@ func TestBlocksFetcher_RequestBlocksRateLimitingLocks(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
require.Equal(t, 1, len(p1.BHost.Network().Peers()), "Expected peers to be connected")
|
||||
req := ðpb.BeaconBlocksByRangeRequest{
|
||||
Count: 64,
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
topic := p2p.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p2.Encoding().ProtocolSuffix())
|
||||
streamHandlerFn := func(stream network.Stream) {
|
||||
assert.NoError(t, stream.Close())
|
||||
@@ -638,7 +640,7 @@ func TestBlocksFetcher_WaitForBandwidth(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) {
|
||||
p1 := p2pt.NewTestP2P(t)
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
tests := []struct {
|
||||
name string
|
||||
req *ethpb.BeaconBlocksByRangeRequest
|
||||
@@ -883,7 +885,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
},
|
||||
}
|
||||
|
||||
topic := p2pm.RPCBlocksByRangeTopicV1
|
||||
topic := p2p.RPCBlocksByRangeTopicV1
|
||||
protocol := libp2pcore.ProtocolID(topic + p1.Encoding().ProtocolSuffix())
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
@@ -893,7 +895,7 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p2 := p2pt.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
|
||||
p2.BHost.SetStreamHandler(protocol, tt.handlerGenFn(tt.req))
|
||||
@@ -993,7 +995,7 @@ func TestBlobRangeForBlocks(t *testing.T) {
|
||||
func TestBlobRequest(t *testing.T) {
|
||||
var nilReq *ethpb.BlobSidecarsByRangeRequest
|
||||
// no blocks
|
||||
req := countCommitments([]blocks.BlockWithROBlobs{}, 0).blobRange(nil).Request()
|
||||
req := countCommitments([]blocks.BlockWithROSidecars{}, 0).blobRange(nil).Request()
|
||||
require.Equal(t, nilReq, req)
|
||||
blks, _ := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, 10)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
@@ -1026,22 +1028,16 @@ func TestBlobRequest(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCountCommitments(t *testing.T) {
|
||||
// no blocks
|
||||
// blocks before retention start filtered
|
||||
// blocks without commitments filtered
|
||||
// pre-deneb filtered
|
||||
// variety of commitment counts are accurate, from 1 to max
|
||||
type testcase struct {
|
||||
name string
|
||||
bwb func(t *testing.T, c testcase) []blocks.BlockWithROBlobs
|
||||
numBlocks int
|
||||
retStart primitives.Slot
|
||||
resCount int
|
||||
name string
|
||||
bwb func(t *testing.T, c testcase) []blocks.BlockWithROSidecars
|
||||
retStart primitives.Slot
|
||||
resCount int
|
||||
}
|
||||
cases := []testcase{
|
||||
{
|
||||
name: "nil blocks is safe",
|
||||
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROBlobs {
|
||||
bwb: func(t *testing.T, c testcase) []blocks.BlockWithROSidecars {
|
||||
return nil
|
||||
},
|
||||
retStart: 0,
|
||||
@@ -1179,7 +1175,7 @@ func TestCommitmentCountList(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROBlobs, []blocks.ROBlob) {
|
||||
func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROSidecars, []blocks.ROBlob) {
|
||||
blks, blobs := util.ExtendBlocksPlusBlobs(t, []blocks.ROBlock{}, nblocks)
|
||||
sbbs := make([]interfaces.ReadOnlySignedBeaconBlock, len(blks))
|
||||
for i := range blks {
|
||||
@@ -1190,7 +1186,7 @@ func testSequenceBlockWithBlob(t *testing.T, nblocks int) ([]blocks.BlockWithROB
|
||||
return bwb, blobs
|
||||
}
|
||||
|
||||
func testReqFromResp(bwb []blocks.BlockWithROBlobs) *ethpb.BlobSidecarsByRangeRequest {
|
||||
func testReqFromResp(bwb []blocks.BlockWithROSidecars) *ethpb.BlobSidecarsByRangeRequest {
|
||||
return ðpb.BlobSidecarsByRangeRequest{
|
||||
StartSlot: bwb[0].Block.Block().Slot(),
|
||||
Count: uint64(bwb[len(bwb)-1].Block.Block().Slot()-bwb[0].Block.Block().Slot()) + 1,
|
||||
@@ -1207,7 +1203,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
}
|
||||
require.Equal(t, len(blobs), len(expectedCommits))
|
||||
|
||||
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
require.NoError(t, err)
|
||||
for _, bw := range bwb {
|
||||
commits, err := bw.Block.Block().Body().BlobKzgCommitments()
|
||||
@@ -1228,7 +1224,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
})
|
||||
t.Run("missing blobs", func(t *testing.T) {
|
||||
bwb, blobs := testSequenceBlockWithBlob(t, 10)
|
||||
_, err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs[1:], testReqFromResp(bwb), nil)
|
||||
require.ErrorIs(t, err, errMissingBlobsForBlockCommitments)
|
||||
})
|
||||
t.Run("no blobs for last block", func(t *testing.T) {
|
||||
@@ -1240,7 +1236,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
blobs = blobs[0 : len(blobs)-len(cmts)]
|
||||
lastBlk, _ = util.GenerateTestDenebBlockWithSidecar(t, lastBlk.Block().ParentRoot(), lastBlk.Block().Slot(), 0)
|
||||
bwb[lastIdx].Block = lastBlk
|
||||
_, err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
err = verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), nil)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
t.Run("blobs not copied if all locally available", func(t *testing.T) {
|
||||
@@ -1254,7 +1250,7 @@ func TestVerifyAndPopulateBlobs(t *testing.T) {
|
||||
r7: {0, 1, 2, 3, 4, 5},
|
||||
}
|
||||
bss := filesystem.NewMockBlobStorageSummarizer(t, onDisk)
|
||||
bwb, err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
err := verifyAndPopulateBlobs(bwb, blobs, testReqFromResp(bwb), bss)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(bwb[i1].Blobs))
|
||||
require.Equal(t, 0, len(bwb[i7].Blobs))
|
||||
@@ -1302,3 +1298,203 @@ func TestBlockFetcher_HasSufficientBandwidth(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, 2, len(receivedPeers))
|
||||
}
|
||||
|
||||
func TestSortedSliceFromMap(t *testing.T) {
|
||||
m := map[uint64]bool{1: true, 3: true, 2: true, 4: true}
|
||||
expected := []uint64{1, 2, 3, 4}
|
||||
|
||||
actual := sortedSliceFromMap(m)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestFetchSidecars(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
t.Run("No blocks", func(t *testing.T) {
|
||||
fetcher := new(blocksFetcher)
|
||||
|
||||
pid, err := fetcher.fetchSidecars(ctx, "", nil, []blocks.BlockWithROSidecars{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, peer.ID(""), pid)
|
||||
})
|
||||
|
||||
t.Run("Nominal", func(t *testing.T) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
numberOfColumns := beaconConfig.NumberOfColumns
|
||||
samplesPerSlot := beaconConfig.SamplesPerSlot
|
||||
|
||||
// Define "now" to be one epoch after genesis time + retention period.
|
||||
genesisTime := time.Date(2025, time.August, 10, 0, 0, 0, 0, time.UTC)
|
||||
secondsPerSlot := beaconConfig.SecondsPerSlot
|
||||
slotsPerEpoch := beaconConfig.SlotsPerEpoch
|
||||
secondsPerEpoch := uint64(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
retentionEpochs := beaconConfig.MinEpochsForDataColumnSidecarsRequest
|
||||
nowWrtGenesisSecs := retentionEpochs.Add(1).Mul(secondsPerEpoch)
|
||||
now := genesisTime.Add(time.Duration(nowWrtGenesisSecs) * time.Second)
|
||||
|
||||
genesisValidatorRoot := [fieldparams.RootLength]byte{}
|
||||
nower := func() time.Time { return now }
|
||||
clock := startup.NewClock(genesisTime, genesisValidatorRoot, startup.WithNower(nower))
|
||||
|
||||
// Define a Deneb block with blobs out of retention period.
|
||||
denebBlock := util.NewBeaconBlockDeneb()
|
||||
denebBlock.Block.Slot = 0 // Genesis slot, out of retention period.
|
||||
signedDenebBlock, err := blocks.NewSignedBeaconBlock(denebBlock)
|
||||
require.NoError(t, err)
|
||||
roDebebBlock, err := blocks.NewROBlock(signedDenebBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define a Fulu block with blobs in the retention period.
|
||||
fuluBlock := util.NewBeaconBlockFulu()
|
||||
fuluBlock.Block.Slot = slotsPerEpoch // Within retention period.
|
||||
fuluBlock.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
bodyRoot, err := fuluBlock.Block.Body.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and save data column sidecars for this fulu block in the database.
|
||||
params := make([]util.DataColumnParam, 0, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
param := util.DataColumnParam{Index: i, Slot: slotsPerEpoch, BodyRoot: bodyRoot[:]}
|
||||
params = append(params, param)
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save the data column sidecars to the storage.
|
||||
err = dataColumnStorage.Save(verifiedRoDataColumnSidecars)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a blocks fetcher.
|
||||
fetcher := &blocksFetcher{
|
||||
clock: clock,
|
||||
p2p: p2ptest.NewTestP2P(t),
|
||||
dcs: dataColumnStorage,
|
||||
}
|
||||
|
||||
// Fetch sidecars.
|
||||
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||
{Block: roDebebBlock},
|
||||
{Block: roFuluBlock},
|
||||
}
|
||||
pid, err := fetcher.fetchSidecars(ctx, "", nil, blocksWithSidecars)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, peer.ID(""), pid)
|
||||
|
||||
// Verify that block with sidecars were modified correctly.
|
||||
require.Equal(t, 0, len(blocksWithSidecars[0].Blobs))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[0].Columns))
|
||||
require.Equal(t, 0, len(blocksWithSidecars[1].Blobs))
|
||||
|
||||
// We don't check the content of the columns here. The extensive test is done
|
||||
// in TestFetchDataColumnsSidecars.
|
||||
require.Equal(t, samplesPerSlot, uint64(len(blocksWithSidecars[1].Columns)))
|
||||
})
|
||||
}
|
||||
func TestFirstFuluIndex(t *testing.T) {
|
||||
bellatrix := util.NewBeaconBlockBellatrix()
|
||||
signedBellatrix, err := blocks.NewSignedBeaconBlock(bellatrix)
|
||||
require.NoError(t, err)
|
||||
roBellatrix, err := blocks.NewROBlock(signedBellatrix)
|
||||
require.NoError(t, err)
|
||||
|
||||
capella := util.NewBeaconBlockCapella()
|
||||
signedCapella, err := blocks.NewSignedBeaconBlock(capella)
|
||||
require.NoError(t, err)
|
||||
roCapella, err := blocks.NewROBlock(signedCapella)
|
||||
require.NoError(t, err)
|
||||
|
||||
deneb := util.NewBeaconBlockDeneb()
|
||||
signedDeneb, err := blocks.NewSignedBeaconBlock(deneb)
|
||||
require.NoError(t, err)
|
||||
roDeneb, err := blocks.NewROBlock(signedDeneb)
|
||||
require.NoError(t, err)
|
||||
|
||||
fulu := util.NewBeaconBlockFulu()
|
||||
signedFulu, err := blocks.NewSignedBeaconBlock(fulu)
|
||||
require.NoError(t, err)
|
||||
roFulu, err := blocks.NewROBlock(signedFulu)
|
||||
require.NoError(t, err)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupBlocks func(t *testing.T) []blocks.BlockWithROSidecars
|
||||
expectedIndex int
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "all blocks are pre-Fulu",
|
||||
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||
return []blocks.BlockWithROSidecars{
|
||||
{Block: roBellatrix},
|
||||
{Block: roCapella},
|
||||
{Block: roDeneb},
|
||||
}
|
||||
},
|
||||
expectedIndex: 3, // Should be the length of the slice
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "all blocks are Fulu or later",
|
||||
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||
return []blocks.BlockWithROSidecars{
|
||||
{Block: roFulu},
|
||||
{Block: roFulu},
|
||||
}
|
||||
},
|
||||
expectedIndex: 0,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "mixed blocks correctly sorted",
|
||||
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||
|
||||
return []blocks.BlockWithROSidecars{
|
||||
{Block: roBellatrix},
|
||||
{Block: roCapella},
|
||||
{Block: roDeneb},
|
||||
{Block: roFulu},
|
||||
{Block: roFulu},
|
||||
}
|
||||
},
|
||||
expectedIndex: 3, // Index where Fulu blocks start
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "mixed blocks incorrectly sorted",
|
||||
setupBlocks: func(t *testing.T) []blocks.BlockWithROSidecars {
|
||||
return []blocks.BlockWithROSidecars{
|
||||
{Block: roBellatrix},
|
||||
{Block: roCapella},
|
||||
{Block: roFulu},
|
||||
{Block: roDeneb},
|
||||
{Block: roFulu},
|
||||
}
|
||||
},
|
||||
expectedIndex: 0,
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
blocks := tt.setupBlocks(t)
|
||||
index, err := findFirstFuluIndex(blocks)
|
||||
|
||||
if tt.expectError {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedIndex, index)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
type forkData struct {
|
||||
blocksFrom peer.ID
|
||||
blobsFrom peer.ID
|
||||
bwb []blocks.BlockWithROBlobs
|
||||
bwb []blocks.BlockWithROSidecars
|
||||
}
|
||||
|
||||
// nonSkippedSlotAfter checks slots after the given one in an attempt to find a non-empty future slot.
|
||||
@@ -275,16 +275,18 @@ func (f *blocksFetcher) findForkWithPeer(ctx context.Context, pid peer.ID, slot
|
||||
"slot": block.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", parentRoot),
|
||||
}).Debug("Block with unknown parent root has been found")
|
||||
altBlocks, err := sortedBlockWithVerifiedBlobSlice(blocks[i-1:])
|
||||
bwb, err := sortedBlockWithVerifiedBlobSlice(blocks[i-1:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid blocks received in findForkWithPeer")
|
||||
}
|
||||
|
||||
// We need to fetch the blobs for the given alt-chain if any exist, so that we can try to verify and import
|
||||
// the blocks.
|
||||
bpid, bwb, err := f.fetchBlobsFromPeer(ctx, altBlocks, pid, []peer.ID{pid})
|
||||
bpid, err := f.fetchSidecars(ctx, pid, []peer.ID{pid}, bwb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findForkWithPeer")
|
||||
return nil, errors.Wrap(err, "fetch sidecars")
|
||||
}
|
||||
|
||||
// The caller will use the BlocksWith VerifiedBlobs in bwb as the starting point for
|
||||
// round-robin syncing the alternate chain.
|
||||
return &forkData{blocksFrom: pid, blobsFrom: bpid, bwb: bwb}, nil
|
||||
@@ -303,10 +305,9 @@ func (f *blocksFetcher) findAncestor(ctx context.Context, pid peer.ID, b interfa
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "received invalid blocks in findAncestor")
|
||||
}
|
||||
var bpid peer.ID
|
||||
bpid, bwb, err = f.fetchBlobsFromPeer(ctx, bwb, pid, []peer.ID{pid})
|
||||
bpid, err := f.fetchSidecars(ctx, pid, []peer.ID{pid}, bwb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to retrieve blobs for blocks found in findAncestor")
|
||||
return nil, errors.Wrap(err, "fetch sidecars")
|
||||
}
|
||||
return &forkData{
|
||||
blocksFrom: pid,
|
||||
@@ -350,9 +351,12 @@ func (f *blocksFetcher) calculateHeadAndTargetEpochs() (headEpoch, targetEpoch p
|
||||
cp := f.chain.FinalizedCheckpt()
|
||||
headEpoch = cp.Epoch
|
||||
targetEpoch, peers = f.p2p.Peers().BestFinalized(params.BeaconConfig().MaxPeersToSync, headEpoch)
|
||||
} else {
|
||||
headEpoch = slots.ToEpoch(f.chain.HeadSlot())
|
||||
targetEpoch, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
|
||||
|
||||
return headEpoch, targetEpoch, peers
|
||||
}
|
||||
|
||||
headEpoch = slots.ToEpoch(f.chain.HeadSlot())
|
||||
targetEpoch, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
|
||||
|
||||
return headEpoch, targetEpoch, peers
|
||||
}
|
||||
|
||||
@@ -72,6 +72,8 @@ type blocksQueueConfig struct {
|
||||
db db.ReadOnlyDatabase
|
||||
mode syncMode
|
||||
bs filesystem.BlobStorageSummarizer
|
||||
dcs filesystem.DataColumnStorageReader
|
||||
cv verification.NewDataColumnsVerifier
|
||||
}
|
||||
|
||||
// blocksQueue is a priority queue that serves as a intermediary between block fetchers (producers)
|
||||
@@ -96,7 +98,7 @@ type blocksQueue struct {
|
||||
type blocksQueueFetchedData struct {
|
||||
blocksFrom peer.ID
|
||||
blobsFrom peer.ID
|
||||
bwb []blocks.BlockWithROBlobs
|
||||
bwb []blocks.BlockWithROSidecars
|
||||
}
|
||||
|
||||
// newBlocksQueue creates initialized priority queue.
|
||||
@@ -115,6 +117,8 @@ func newBlocksQueue(ctx context.Context, cfg *blocksQueueConfig) *blocksQueue {
|
||||
db: cfg.db,
|
||||
clock: cfg.clock,
|
||||
bs: cfg.bs,
|
||||
dcs: cfg.dcs,
|
||||
cv: cfg.cv,
|
||||
})
|
||||
}
|
||||
highestExpectedSlot := cfg.highestExpectedSlot
|
||||
|
||||
@@ -263,7 +263,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
|
||||
highestExpectedSlot: tt.highestExpectedSlot,
|
||||
})
|
||||
assert.NoError(t, queue.start())
|
||||
processBlock := func(b blocks.BlockWithROBlobs) error {
|
||||
processBlock := func(b blocks.BlockWithROSidecars) error {
|
||||
block := b.Block
|
||||
if !beaconDB.HasBlock(ctx, block.Block().ParentRoot()) {
|
||||
return fmt.Errorf("%w: %#x", errParentDoesNotExist, block.Block().ParentRoot())
|
||||
@@ -275,7 +275,7 @@ func TestBlocksQueue_Loop(t *testing.T) {
|
||||
return mc.ReceiveBlock(ctx, block, root, nil)
|
||||
}
|
||||
|
||||
var blocks []blocks.BlockWithROBlobs
|
||||
var blocks []blocks.BlockWithROSidecars
|
||||
for data := range queue.fetchedData {
|
||||
for _, b := range data.bwb {
|
||||
if err := processBlock(b); err != nil {
|
||||
@@ -538,7 +538,7 @@ func TestBlocksQueue_onDataReceivedEvent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
response := &fetchRequestResponse{
|
||||
blocksFrom: "abc",
|
||||
bwb: []blocks.BlockWithROBlobs{
|
||||
bwb: []blocks.BlockWithROSidecars{
|
||||
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsb}},
|
||||
{Block: blocks.ROBlock{ReadOnlySignedBeaconBlock: wsbCopy}},
|
||||
},
|
||||
@@ -640,7 +640,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
queue.smm.machines[256].fetched.blocksFrom = pidDataParsed
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
queue.smm.machines[256].fetched.bwb = []blocks.BlockWithROBlobs{
|
||||
queue.smm.machines[256].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||
{Block: rwsb},
|
||||
}
|
||||
|
||||
@@ -674,7 +674,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROBlobs{
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||
{Block: rwsb},
|
||||
}
|
||||
|
||||
@@ -705,7 +705,7 @@ func TestBlocksQueue_onReadyToSendEvent(t *testing.T) {
|
||||
queue.smm.machines[320].fetched.blocksFrom = pidDataParsed
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROBlobs{
|
||||
queue.smm.machines[320].fetched.bwb = []blocks.BlockWithROSidecars{
|
||||
{Block: rwsb},
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/paulbellamy/ratecounter"
|
||||
@@ -78,6 +80,8 @@ func (s *Service) startBlocksQueue(ctx context.Context, highestSlot primitives.S
|
||||
highestExpectedSlot: highestSlot,
|
||||
mode: mode,
|
||||
bs: s.cfg.BlobStorage,
|
||||
dcs: s.cfg.DataColumnStorage,
|
||||
cv: s.newDataColumnsVerifier,
|
||||
}
|
||||
queue := newBlocksQueue(ctx, cfg)
|
||||
if err := queue.start(); err != nil {
|
||||
@@ -157,31 +161,82 @@ func (s *Service) processFetchedDataRegSync(ctx context.Context, data *blocksQue
|
||||
log.WithError(err).Debug("Batch did not contain a valid sequence of unprocessed blocks")
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(bwb) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
batchFields := logrus.Fields{
|
||||
"firstSlot": data.bwb[0].Block.Block().Slot(),
|
||||
"firstUnprocessed": bwb[0].Block.Block().Slot(),
|
||||
|
||||
// Separate blocks with blobs from blocks with data columns.
|
||||
fistDataColumnIndex := sort.Search(len(bwb), func(i int) bool {
|
||||
return bwb[i].Block.Version() >= version.Fulu
|
||||
})
|
||||
|
||||
blocksWithBlobs := bwb[:fistDataColumnIndex]
|
||||
blocksWithDataColumns := bwb[fistDataColumnIndex:]
|
||||
|
||||
blobBatchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
lazilyPersistentStoreBlobs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, blobBatchVerifier)
|
||||
|
||||
log := log.WithField("firstSlot", data.bwb[0].Block.Block().Slot())
|
||||
logBlobs, logDataColumns := log, log
|
||||
|
||||
if len(blocksWithBlobs) > 0 {
|
||||
logBlobs = logBlobs.WithField("firstUnprocessed", blocksWithBlobs[0].Block.Block().Slot())
|
||||
}
|
||||
for i, b := range bwb {
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(b.Blobs)
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Batch failure due to BlobSidecar issues")
|
||||
|
||||
for i, b := range blocksWithBlobs {
|
||||
if err := lazilyPersistentStoreBlobs.Persist(s.clock.CurrentSlot(), b.Blobs...); err != nil {
|
||||
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warning("Batch failure due to BlobSidecar issues")
|
||||
return uint64(i), err
|
||||
}
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, avs); err != nil {
|
||||
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, lazilyPersistentStoreBlobs); err != nil {
|
||||
if errors.Is(err, errParentDoesNotExist) {
|
||||
log.WithFields(batchFields).WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
logBlobs.WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
WithFields(syncFields(b.Block)).Debug("Could not process batch blocks due to missing parent")
|
||||
} else {
|
||||
log.WithError(err).WithFields(batchFields).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
||||
logBlobs.WithError(err).WithFields(syncFields(b.Block)).Warn("Block processing failure")
|
||||
}
|
||||
|
||||
return uint64(i), err
|
||||
}
|
||||
}
|
||||
|
||||
if len(blocksWithDataColumns) == 0 {
|
||||
return uint64(len(bwb)), nil
|
||||
}
|
||||
|
||||
// Save data column sidecars.
|
||||
count := 0
|
||||
for _, b := range blocksWithDataColumns {
|
||||
count += len(b.Columns)
|
||||
}
|
||||
|
||||
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
|
||||
for _, blockWithDataColumns := range blocksWithDataColumns {
|
||||
sidecarsToSave = append(sidecarsToSave, blockWithDataColumns.Columns...)
|
||||
}
|
||||
|
||||
if err := s.cfg.DataColumnStorage.Save(sidecarsToSave); err != nil {
|
||||
return 0, errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
for i, b := range blocksWithDataColumns {
|
||||
logDataColumns := logDataColumns.WithFields(syncFields(b.Block))
|
||||
|
||||
if err := s.processBlock(ctx, s.genesisTime, b, s.cfg.Chain.ReceiveBlock, nil); err != nil {
|
||||
switch {
|
||||
case errors.Is(err, errParentDoesNotExist):
|
||||
logDataColumns.
|
||||
WithField("missingParent", fmt.Sprintf("%#x", b.Block.Block().ParentRoot())).
|
||||
Debug("Could not process batch blocks due to missing parent")
|
||||
return uint64(i), err
|
||||
default:
|
||||
logDataColumns.WithError(err).Warning("Block processing failure")
|
||||
return uint64(i), err
|
||||
}
|
||||
}
|
||||
}
|
||||
return uint64(len(bwb)), nil
|
||||
}
|
||||
|
||||
@@ -193,12 +248,18 @@ func syncFields(b blocks.ROBlock) logrus.Fields {
|
||||
}
|
||||
|
||||
// highestFinalizedEpoch returns the absolute highest finalized epoch of all connected peers.
|
||||
// Note this can be lower than our finalized epoch if we have no peers or peers that are all behind us.
|
||||
// It returns `0` if no peers are connected.
|
||||
// Note this can be lower than our finalized epoch if our connected peers are all behind us.
|
||||
func (s *Service) highestFinalizedEpoch() primitives.Epoch {
|
||||
highest := primitives.Epoch(0)
|
||||
for _, pid := range s.cfg.P2P.Peers().Connected() {
|
||||
peerChainState, err := s.cfg.P2P.Peers().ChainState(pid)
|
||||
if err == nil && peerChainState != nil && peerChainState.FinalizedEpoch > highest {
|
||||
|
||||
if err != nil || peerChainState == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if peerChainState.FinalizedEpoch > highest {
|
||||
highest = peerChainState.FinalizedEpoch
|
||||
}
|
||||
}
|
||||
@@ -250,7 +311,7 @@ func (s *Service) logBatchSyncStatus(firstBlk blocks.ROBlock, nBlocks int) {
|
||||
func (s *Service) processBlock(
|
||||
ctx context.Context,
|
||||
genesis time.Time,
|
||||
bwb blocks.BlockWithROBlobs,
|
||||
bwb blocks.BlockWithROSidecars,
|
||||
blockReceiver blockReceiverFn,
|
||||
avs das.AvailabilityStore,
|
||||
) error {
|
||||
@@ -269,7 +330,7 @@ func (s *Service) processBlock(
|
||||
|
||||
type processedChecker func(context.Context, blocks.ROBlock) bool
|
||||
|
||||
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROBlobs, error) {
|
||||
func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROSidecars, headSlot primitives.Slot, isProc processedChecker) ([]blocks.BlockWithROSidecars, error) {
|
||||
// use a pointer to avoid confusing the zero-value with the case where the first element is processed.
|
||||
var processed *int
|
||||
for i := range bwb {
|
||||
@@ -299,43 +360,100 @@ func validUnprocessed(ctx context.Context, bwb []blocks.BlockWithROBlobs, headSl
|
||||
return bwb[nonProcessedIdx:], nil
|
||||
}
|
||||
|
||||
func (s *Service) processBatchedBlocks(ctx context.Context, bwb []blocks.BlockWithROBlobs, bFunc batchBlockReceiverFn) (uint64, error) {
|
||||
if len(bwb) == 0 {
|
||||
func (s *Service) processBatchedBlocks(ctx context.Context, bwb []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn) (uint64, error) {
|
||||
bwbCount := uint64(len(bwb))
|
||||
if bwbCount == 0 {
|
||||
return 0, errors.New("0 blocks provided into method")
|
||||
}
|
||||
|
||||
headSlot := s.cfg.Chain.HeadSlot()
|
||||
var err error
|
||||
bwb, err = validUnprocessed(ctx, bwb, headSlot, s.isProcessedBlock)
|
||||
bwb, err := validUnprocessed(ctx, bwb, headSlot, s.isProcessedBlock)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(bwb) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
first := bwb[0].Block
|
||||
if !s.cfg.Chain.HasBlock(ctx, first.Block().ParentRoot()) {
|
||||
firstBlock := bwb[0].Block
|
||||
if !s.cfg.Chain.HasBlock(ctx, firstBlock.Block().ParentRoot()) {
|
||||
return 0, fmt.Errorf("%w: %#x (in processBatchedBlocks, slot=%d)",
|
||||
errParentDoesNotExist, first.Block().ParentRoot(), first.Block().Slot())
|
||||
errParentDoesNotExist, firstBlock.Block().ParentRoot(), firstBlock.Block().Slot())
|
||||
}
|
||||
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
s.logBatchSyncStatus(first, len(bwb))
|
||||
for _, bb := range bwb {
|
||||
if len(bb.Blobs) == 0 {
|
||||
firstFuluIndex, err := findFirstFuluIndex(bwb)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "finding first Fulu index")
|
||||
}
|
||||
|
||||
blocksWithBlobs := bwb[:firstFuluIndex]
|
||||
blocksWithDataColumns := bwb[firstFuluIndex:]
|
||||
|
||||
if err := s.processBlocksWithBlobs(ctx, blocksWithBlobs, bFunc, firstBlock); err != nil {
|
||||
return 0, errors.Wrap(err, "processing blocks with blobs")
|
||||
}
|
||||
|
||||
if err := s.processBlocksWithDataColumns(ctx, blocksWithDataColumns, bFunc, firstBlock); err != nil {
|
||||
return 0, errors.Wrap(err, "processing blocks with data columns")
|
||||
}
|
||||
|
||||
return bwbCount, nil
|
||||
}
|
||||
|
||||
func (s *Service) processBlocksWithBlobs(ctx context.Context, bwbs []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn, firstBlock blocks.ROBlock) error {
|
||||
bwbCount := len(bwbs)
|
||||
if bwbCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
batchVerifier := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
persistentStore := das.NewLazilyPersistentStore(s.cfg.BlobStorage, batchVerifier)
|
||||
s.logBatchSyncStatus(firstBlock, bwbCount)
|
||||
|
||||
for _, bwb := range bwbs {
|
||||
if len(bwb.Blobs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(bb.Blobs)
|
||||
|
||||
if err := avs.Persist(s.clock.CurrentSlot(), sidecars...); err != nil {
|
||||
return 0, err
|
||||
if err := persistentStore.Persist(s.clock.CurrentSlot(), bwb.Blobs...); err != nil {
|
||||
return errors.Wrap(err, "persisting blobs")
|
||||
}
|
||||
}
|
||||
|
||||
robs := blocks.BlockWithROBlobsSlice(bwb).ROBlocks()
|
||||
return uint64(len(bwb)), bFunc(ctx, robs, avs)
|
||||
robs := blocks.BlockWithROBlobsSlice(bwbs).ROBlocks()
|
||||
if err := bFunc(ctx, robs, persistentStore); err != nil {
|
||||
return errors.Wrap(err, "processing blocks with blobs")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) processBlocksWithDataColumns(ctx context.Context, bwbs []blocks.BlockWithROSidecars, bFunc batchBlockReceiverFn, firstBlock blocks.ROBlock) error {
|
||||
bwbCount := len(bwbs)
|
||||
if bwbCount == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.logBatchSyncStatus(firstBlock, bwbCount)
|
||||
|
||||
// Save data column sidecars.
|
||||
count := 0
|
||||
for _, bwb := range bwbs {
|
||||
count += len(bwb.Columns)
|
||||
}
|
||||
|
||||
sidecarsToSave := make([]blocks.VerifiedRODataColumn, 0, count)
|
||||
for _, blockWithDataColumns := range bwbs {
|
||||
sidecarsToSave = append(sidecarsToSave, blockWithDataColumns.Columns...)
|
||||
}
|
||||
|
||||
if err := s.cfg.DataColumnStorage.Save(sidecarsToSave); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
robs := blocks.BlockWithROBlobsSlice(bwbs).ROBlocks()
|
||||
if err := bFunc(ctx, robs, nil); err != nil {
|
||||
return errors.Wrap(err, "process post-Fulu blocks")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isPunishableError(err error) bool {
|
||||
|
||||
@@ -8,9 +8,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
dbtest "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
p2pt "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -373,7 +375,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
||||
return nil
|
||||
@@ -385,7 +387,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err = blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
return nil
|
||||
}, nil)
|
||||
@@ -396,7 +398,7 @@ func TestService_processBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err = blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROBlobs{Block: rowsb}, func(
|
||||
err = s.processBlock(ctx, genesis, blocks.BlockWithROSidecars{Block: rowsb}, func(
|
||||
ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, _ das.AvailabilityStore) error {
|
||||
assert.NoError(t, s.cfg.Chain.ReceiveBlock(ctx, block, blockRoot, nil))
|
||||
return nil
|
||||
@@ -432,7 +434,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
s.genesisTime = genesis
|
||||
|
||||
t.Run("process non-linear batch", func(t *testing.T) {
|
||||
var batch []blocks.BlockWithROBlobs
|
||||
var batch []blocks.BlockWithROSidecars
|
||||
currBlockRoot := genesisBlkRoot
|
||||
for i := primitives.Slot(1); i < 10; i++ {
|
||||
parentRoot := currBlockRoot
|
||||
@@ -446,11 +448,11 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
|
||||
batch = append(batch, blocks.BlockWithROSidecars{Block: rowsb})
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
|
||||
var batch2 []blocks.BlockWithROBlobs
|
||||
var batch2 []blocks.BlockWithROSidecars
|
||||
for i := primitives.Slot(10); i < 20; i++ {
|
||||
parentRoot := currBlockRoot
|
||||
blk1 := util.NewBeaconBlock()
|
||||
@@ -463,7 +465,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
batch2 = append(batch2, blocks.BlockWithROBlobs{Block: rowsb})
|
||||
batch2 = append(batch2, blocks.BlockWithROSidecars{Block: rowsb})
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
|
||||
@@ -485,7 +487,7 @@ func TestService_processBlockBatch(t *testing.T) {
|
||||
assert.ErrorContains(t, "block is already processed", err)
|
||||
require.Equal(t, uint64(0), count)
|
||||
|
||||
var badBatch2 []blocks.BlockWithROBlobs
|
||||
var badBatch2 []blocks.BlockWithROSidecars
|
||||
for i, b := range batch2 {
|
||||
// create a non-linear batch
|
||||
if i%3 == 0 && i != 0 {
|
||||
@@ -685,7 +687,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, t.Context(), beaconDB, genesisBlk)
|
||||
|
||||
var batch []blocks.BlockWithROBlobs
|
||||
var batch []blocks.BlockWithROSidecars
|
||||
currBlockRoot := genesisBlkRoot
|
||||
for i := primitives.Slot(1); i < 10; i++ {
|
||||
parentRoot := currBlockRoot
|
||||
@@ -699,7 +701,7 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
batch = append(batch, blocks.BlockWithROBlobs{Block: rowsb})
|
||||
batch = append(batch, blocks.BlockWithROSidecars{Block: rowsb})
|
||||
currBlockRoot = blk1Root
|
||||
}
|
||||
|
||||
@@ -712,3 +714,155 @@ func TestService_ValidUnprocessed(t *testing.T) {
|
||||
// Ensure that the unprocessed batch is returned correctly.
|
||||
assert.Equal(t, len(retBlocks), len(batch)-2)
|
||||
}
|
||||
|
||||
func TestService_PropcessFetchedDataRegSync(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create Fulu blocks.
|
||||
fuluBlock1 := util.NewBeaconBlockFulu()
|
||||
signedFuluBlock1, err := blocks.NewSignedBeaconBlock(fuluBlock1)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock1, err := blocks.NewROBlock(signedFuluBlock1)
|
||||
require.NoError(t, err)
|
||||
block1Root := roFuluBlock1.Root()
|
||||
|
||||
fuluBlock2 := util.NewBeaconBlockFulu()
|
||||
fuluBlock2.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
fuluBlock2.Block.Slot = 1
|
||||
fuluBlock2.Block.ParentRoot = block1Root[:]
|
||||
signedFuluBlock2, err := blocks.NewSignedBeaconBlock(fuluBlock2)
|
||||
require.NoError(t, err)
|
||||
|
||||
roFuluBlock2, err := blocks.NewROBlock(signedFuluBlock2)
|
||||
require.NoError(t, err)
|
||||
block2Root := roFuluBlock2.Root()
|
||||
parentRoot2 := roFuluBlock2.Block().ParentRoot()
|
||||
bodyRoot2, err := roFuluBlock2.Block().Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a mock chain service.
|
||||
const validatorCount = uint64(64)
|
||||
state, _ := util.DeterministicGenesisState(t, validatorCount)
|
||||
chain := &mock.ChainService{
|
||||
FinalizedCheckPoint: ð.Checkpoint{},
|
||||
DB: dbtest.SetupDB(t),
|
||||
State: state,
|
||||
Root: block1Root[:],
|
||||
}
|
||||
|
||||
// Create a new service instance.
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
Chain: chain,
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
},
|
||||
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||
}
|
||||
|
||||
// Save the parent block in the database.
|
||||
err = chain.DB.SaveBlock(ctx, roFuluBlock1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create data column sidecars.
|
||||
const count = uint64(3)
|
||||
params := make([]util.DataColumnParam, 0, count)
|
||||
for i := range count {
|
||||
param := util.DataColumnParam{Index: i, BodyRoot: bodyRoot2[:], ParentRoot: parentRoot2[:], Slot: roFuluBlock2.Block().Slot()}
|
||||
params = append(params, param)
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||
{Block: roFuluBlock2, Columns: verifiedRoDataColumnSidecars},
|
||||
}
|
||||
|
||||
data := &blocksQueueFetchedData{
|
||||
bwb: blocksWithSidecars,
|
||||
}
|
||||
|
||||
actual, err := service.processFetchedDataRegSync(ctx, data)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), actual)
|
||||
|
||||
// Check block and data column sidecars were saved correctly.
|
||||
require.Equal(t, true, chain.DB.HasBlock(ctx, block2Root))
|
||||
|
||||
summary := dataColumnStorage.Summary(block2Root)
|
||||
for i := range count {
|
||||
require.Equal(t, true, summary.HasIndex(i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_processBlocksWithDataColumns(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("no blocks", func(t *testing.T) {
|
||||
fuluBlock := util.NewBeaconBlockFulu()
|
||||
|
||||
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
service := new(Service)
|
||||
err = service.processBlocksWithDataColumns(ctx, nil, nil, roFuluBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
fuluBlock := util.NewBeaconBlockFulu()
|
||||
fuluBlock.Block.Body.BlobKzgCommitments = [][]byte{make([]byte, fieldparams.KzgCommitmentSize)} // Dummy commitment.
|
||||
signedFuluBlock, err := blocks.NewSignedBeaconBlock(fuluBlock)
|
||||
require.NoError(t, err)
|
||||
roFuluBlock, err := blocks.NewROBlock(signedFuluBlock)
|
||||
require.NoError(t, err)
|
||||
bodyRoot, err := roFuluBlock.Block().Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create data column sidecars.
|
||||
const count = uint64(3)
|
||||
params := make([]util.DataColumnParam, 0, count)
|
||||
for i := range count {
|
||||
param := util.DataColumnParam{Index: i, BodyRoot: bodyRoot[:]}
|
||||
params = append(params, param)
|
||||
}
|
||||
_, verifiedRoDataColumnSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, params)
|
||||
|
||||
blocksWithSidecars := []blocks.BlockWithROSidecars{
|
||||
{Block: roFuluBlock, Columns: verifiedRoDataColumnSidecars},
|
||||
}
|
||||
|
||||
// Create a data columns storage.
|
||||
dir := t.TempDir()
|
||||
dataColumnStorage, err := filesystem.NewDataColumnStorage(ctx, filesystem.WithDataColumnBasePath(dir))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a service.
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
P2P: p2pt.NewTestP2P(t),
|
||||
DataColumnStorage: dataColumnStorage,
|
||||
},
|
||||
counter: ratecounter.NewRateCounter(counterSeconds * time.Second),
|
||||
}
|
||||
|
||||
receiverFunc := func(ctx context.Context, blks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
require.Equal(t, 1, len(blks))
|
||||
return nil
|
||||
}
|
||||
|
||||
err = service.processBlocksWithDataColumns(ctx, blocksWithSidecars, receiverFunc, roFuluBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that the data columns were saved correctly.
|
||||
summary := dataColumnStorage.Summary(roFuluBlock.Root())
|
||||
for i := range count {
|
||||
require.Equal(t, true, summary.HasIndex(i))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/sync"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/rand"
|
||||
@@ -53,22 +55,24 @@ type Config struct {
|
||||
ClockWaiter startup.ClockWaiter
|
||||
InitialSyncComplete chan struct{}
|
||||
BlobStorage *filesystem.BlobStorage
|
||||
DataColumnStorage *filesystem.DataColumnStorage
|
||||
}
|
||||
|
||||
// Service service.
|
||||
type Service struct {
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
genesisTime time.Time
|
||||
cfg *Config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
synced *abool.AtomicBool
|
||||
chainStarted *abool.AtomicBool
|
||||
counter *ratecounter.RateCounter
|
||||
genesisChan chan time.Time
|
||||
clock *startup.Clock
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
newBlobVerifier verification.NewBlobVerifier
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
ctxMap sync.ContextByteVersions
|
||||
genesisTime time.Time
|
||||
}
|
||||
|
||||
// Option is a functional option for the initial-sync Service.
|
||||
@@ -149,6 +153,7 @@ func (s *Service) Start() {
|
||||
return
|
||||
}
|
||||
s.newBlobVerifier = newBlobVerifierFromInitializer(v)
|
||||
s.newDataColumnsVerifier = newDataColumnsVerifierFromInitializer(v)
|
||||
|
||||
gt := clock.GenesisTime()
|
||||
if gt.IsZero() {
|
||||
@@ -175,19 +180,22 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.chainStarted.Set()
|
||||
log.Info("Starting initial chain sync...")
|
||||
|
||||
// Are we already in sync, or close to it?
|
||||
if slots.ToEpoch(s.cfg.Chain.HeadSlot()) == slots.ToEpoch(currentSlot) {
|
||||
log.Info("Already synced to the current chain head")
|
||||
s.markSynced()
|
||||
return
|
||||
}
|
||||
|
||||
peers, err := s.waitForMinimumPeers()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error waiting for minimum number of peers")
|
||||
return
|
||||
}
|
||||
if err := s.fetchOriginBlobs(peers); err != nil {
|
||||
log.WithError(err).Error("Failed to fetch missing blobs for checkpoint origin")
|
||||
|
||||
if err := s.fetchOriginSidecars(peers); err != nil {
|
||||
log.WithError(err).Error("Error fetching origin sidecars")
|
||||
return
|
||||
}
|
||||
if err := s.roundRobinSync(); err != nil {
|
||||
@@ -200,6 +208,50 @@ func (s *Service) Start() {
|
||||
s.markSynced()
|
||||
}
|
||||
|
||||
// fetchOriginSidecars fetches origin sidecars
|
||||
func (s *Service) fetchOriginSidecars(peers []peer.ID) error {
|
||||
const delay = 10 * time.Second // The delay between each attempt to fetch origin data column sidecars
|
||||
|
||||
blockRoot, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
}
|
||||
|
||||
block, err := s.cfg.DB.Block(s.ctx, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "block")
|
||||
}
|
||||
|
||||
currentSlot, blockSlot := s.clock.CurrentSlot(), block.Block().Slot()
|
||||
currentEpoch, blockEpoch := slots.ToEpoch(currentSlot), slots.ToEpoch(blockSlot)
|
||||
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
roBlock, err := blocks.NewROBlockWithRoot(block, blockRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
blockVersion := roBlock.Version()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.fetchOriginColumns(roBlock, delay); err != nil {
|
||||
return errors.Wrap(err, "fetch origin columns")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
if err := s.fetchOriginBlobs(peers, roBlock); err != nil {
|
||||
return errors.Wrap(err, "fetch origin blobs")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop initial sync.
|
||||
func (s *Service) Stop() error {
|
||||
s.cancel()
|
||||
@@ -304,23 +356,9 @@ func missingBlobRequest(blk blocks.ROBlock, store *filesystem.BlobStorage) (p2pt
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
r, err := s.cfg.DB.OriginCheckpointBlockRoot(s.ctx)
|
||||
if errors.Is(err, db.ErrNotFoundOriginBlockRoot) {
|
||||
return nil
|
||||
}
|
||||
blk, err := s.cfg.DB.Block(s.ctx, r)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).Error("Block for checkpoint sync origin root not found in db")
|
||||
return err
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(blk.Block().Slot()), slots.ToEpoch(s.clock.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
rob, err := blocks.NewROBlockWithRoot(blk, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (s *Service) fetchOriginBlobs(pids []peer.ID, rob blocks.ROBlock) error {
|
||||
r := rob.Root()
|
||||
|
||||
req, err := missingBlobRequest(rob, s.cfg.BlobStorage)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -335,16 +373,17 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(blobSidecars) != len(req) {
|
||||
continue
|
||||
}
|
||||
bv := verification.NewBlobBatchVerifier(s.newBlobVerifier, verification.InitsyncBlobSidecarRequirements)
|
||||
avs := das.NewLazilyPersistentStore(s.cfg.BlobStorage, bv)
|
||||
current := s.clock.CurrentSlot()
|
||||
sidecars := blocks.NewSidecarsFromBlobSidecars(blobSidecars)
|
||||
if err := avs.Persist(current, sidecars...); err != nil {
|
||||
if err := avs.Persist(current, blobSidecars...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := avs.IsDataAvailable(s.ctx, current, rob); err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", r)).WithField("peerID", pids[i]).Warn("Blobs from peer for origin block were unusable")
|
||||
continue
|
||||
@@ -355,6 +394,90 @@ func (s *Service) fetchOriginBlobs(pids []peer.ID) error {
|
||||
return fmt.Errorf("no connected peer able to provide blobs for checkpoint sync block %#x", r)
|
||||
}
|
||||
|
||||
func (s *Service) fetchOriginColumns(roBlock blocks.ROBlock, delay time.Duration) error {
|
||||
const (
|
||||
errorMessage = "Failed to fetch origin data column sidecars"
|
||||
warningIteration = 10
|
||||
)
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
|
||||
// Return early if the origin block has no blob commitments.
|
||||
commitments, err := roBlock.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch blob commitments")
|
||||
}
|
||||
|
||||
if len(commitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the columns to request.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
info, _, err := peerdas.Info(s.cfg.P2P.NodeID(), samplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch peer info")
|
||||
}
|
||||
|
||||
// Fetch origin data column sidecars.
|
||||
root := roBlock.Root()
|
||||
|
||||
params := sync.DataColumnSidecarsParams{
|
||||
Ctx: s.ctx,
|
||||
Tor: s.clock,
|
||||
P2P: s.cfg.P2P,
|
||||
CtxMap: s.ctxMap,
|
||||
Storage: s.cfg.DataColumnStorage,
|
||||
NewVerifier: s.newDataColumnsVerifier,
|
||||
DownscorePeerOnRPCFault: true,
|
||||
}
|
||||
|
||||
var verifiedRoDataColumnsByRoot map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn
|
||||
for attempt := uint64(0); ; attempt++ {
|
||||
verifiedRoDataColumnsByRoot, err = sync.FetchDataColumnSidecars(params, []blocks.ROBlock{roBlock}, info.CustodyColumns)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
log := log.WithError(err).WithFields(logrus.Fields{
|
||||
"attempt": attempt,
|
||||
"delay": delay,
|
||||
})
|
||||
|
||||
if attempt%warningIteration == 0 && attempt > 0 {
|
||||
log.Warning(errorMessage)
|
||||
time.Sleep(delay)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debug(errorMessage)
|
||||
time.Sleep(delay)
|
||||
}
|
||||
|
||||
// Save origin data columns to disk.
|
||||
verifiedRoDataColumnsSidecars, ok := verifiedRoDataColumnsByRoot[root]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot extract origins data column sidecars for block root %#x - should never happen", root)
|
||||
}
|
||||
|
||||
if err := s.cfg.DataColumnStorage.Save(verifiedRoDataColumnsSidecars); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", roBlock.Root()),
|
||||
"blobCount": len(commitments),
|
||||
"columnCount": len(verifiedRoDataColumnsSidecars),
|
||||
}).Info("Successfully downloaded data column sidecars for checkpoint sync block")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func shufflePeers(pids []peer.ID) {
|
||||
rg := rand.NewGenerator()
|
||||
rg.Shuffle(len(pids), func(i, j int) {
|
||||
@@ -367,3 +490,9 @@ func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
func newDataColumnsVerifierFromInitializer(ini *verification.Initializer) verification.NewDataColumnsVerifier {
|
||||
return func(roDataColumns []blocks.RODataColumn, reqs []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return ini.NewDataColumnsVerifier(roDataColumns, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user